summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-13 16:29:25 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-13 16:29:25 +0000
commitdb7d4daea91e105e3859cf461d7e53b9b77454b2 (patch)
tree9bb65b95440af09e8aca63abe56970dd3360cc57 /net
parent9c1c01ead627bdda9211c9abd5b758d6c687d8ac (diff)
Merge with Linux 2.2.8.
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c22
-rw-r--r--net/Config.in1
-rw-r--r--net/Makefile4
-rw-r--r--net/README2
-rw-r--r--net/TUNABLE12
-rw-r--r--net/appletalk/aarp.c26
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bridge/br.c55
-rw-r--r--net/bridge/br_tree.c24
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/dev_mcast.c60
-rw-r--r--net/core/filter.c238
-rw-r--r--net/core/iovec.c9
-rw-r--r--net/core/neighbour.c123
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/scm.c11
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c65
-rw-r--r--net/econet/econet.c1
-rw-r--r--net/ipv4/Config.in4
-rw-r--r--net/ipv4/af_inet.c71
-rw-r--r--net/ipv4/arp.c25
-rw-r--r--net/ipv4/devinet.c29
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/fib_hash.c98
-rw-r--r--net/ipv4/fib_rules.c38
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/icmp.c34
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/ip_fw.c6
-rw-r--r--net/ipv4/ip_gre.c105
-rw-r--r--net/ipv4/ip_input.c112
-rw-r--r--net/ipv4/ip_masq.c74
-rw-r--r--net/ipv4/ip_masq_mfw.c2
-rw-r--r--net/ipv4/ip_nat_dumb.c33
-rw-r--r--net/ipv4/ip_options.c30
-rw-r--r--net/ipv4/ip_output.c171
-rw-r--r--net/ipv4/ip_sockglue.c23
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipip.c106
-rw-r--r--net/ipv4/ipmr.c17
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv4/route.c472
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/tcp.c82
-rw-r--r--net/ipv4/tcp_input.c67
-rw-r--r--net/ipv4/tcp_ipv4.c121
-rw-r--r--net/ipv4/tcp_output.c80
-rw-r--r--net/ipv4/tcp_timer.c62
-rw-r--r--net/ipv4/timer.c5
-rw-r--r--net/ipv4/udp.c29
-rw-r--r--net/ipv6/Makefile3
-rw-r--r--net/ipv6/addrconf.c210
-rw-r--r--net/ipv6/af_inet6.c24
-rw-r--r--net/ipv6/datagram.c47
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/ip6_fib.c14
-rw-r--r--net/ipv6/ip6_flowlabel.c620
-rw-r--r--net/ipv6/ip6_output.c48
-rw-r--r--net/ipv6/ipv6_sockglue.c36
-rw-r--r--net/ipv6/mcast.c28
-rw-r--r--net/ipv6/ndisc.c18
-rw-r--r--net/ipv6/raw.c46
-rw-r--r--net/ipv6/route.c61
-rw-r--r--net/ipv6/sit.c103
-rw-r--r--net/ipv6/tcp_ipv6.c136
-rw-r--r--net/ipv6/udp.c143
-rw-r--r--net/ipx/Makefile8
-rw-r--r--net/ipx/af_ipx.c45
-rw-r--r--net/irda/Config.in2
-rw-r--r--net/irda/Makefile20
-rw-r--r--net/irda/af_irda.c1289
-rw-r--r--net/irda/discovery.c245
-rw-r--r--net/irda/ircomm/Makefile8
-rw-r--r--net/irda/ircomm/attach.c364
-rw-r--r--net/irda/ircomm/ircomm_common.c1489
-rw-r--r--net/irda/ircomm/irvtd.c153
-rw-r--r--net/irda/ircomm/irvtd_driver.c1883
-rw-r--r--net/irda/irda_device.c412
-rw-r--r--net/irda/iriap.c460
-rw-r--r--net/irda/iriap_event.c11
-rw-r--r--net/irda/irias_object.c5
-rw-r--r--net/irda/irlan/Config.in6
-rw-r--r--net/irda/irlan/Makefile33
-rw-r--r--net/irda/irlan/irlan_cli.c676
-rw-r--r--net/irda/irlan/irlan_cli_event.c494
-rw-r--r--net/irda/irlan/irlan_client.c589
-rw-r--r--net/irda/irlan/irlan_client_event.c527
-rw-r--r--net/irda/irlan/irlan_common.c1134
-rw-r--r--net/irda/irlan/irlan_eth.c296
-rw-r--r--net/irda/irlan/irlan_event.c20
-rw-r--r--net/irda/irlan/irlan_filter.c235
-rw-r--r--net/irda/irlan/irlan_provider.c425
-rw-r--r--net/irda/irlan/irlan_provider_event.c247
-rw-r--r--net/irda/irlan/irlan_srv.c915
-rw-r--r--net/irda/irlan/irlan_srv_event.c268
-rw-r--r--net/irda/irlap.c312
-rw-r--r--net/irda/irlap_comp.c23
-rw-r--r--net/irda/irlap_event.c857
-rw-r--r--net/irda/irlap_frame.c746
-rw-r--r--net/irda/irlmp.c1094
-rw-r--r--net/irda/irlmp_event.c366
-rw-r--r--net/irda/irlmp_frame.c243
-rw-r--r--net/irda/irlpt/irlpt_cli.c283
-rw-r--r--net/irda/irlpt/irlpt_cli_fsm.c177
-rw-r--r--net/irda/irlpt/irlpt_common.c190
-rw-r--r--net/irda/irlpt/irlpt_srvr.c202
-rw-r--r--net/irda/irlpt/irlpt_srvr_fsm.c37
-rw-r--r--net/irda/irmod.c203
-rw-r--r--net/irda/irobex/Config.in3
-rw-r--r--net/irda/irobex/Makefile19
-rw-r--r--net/irda/irobex/irobex.c1119
-rw-r--r--net/irda/irproc.c355
-rw-r--r--net/irda/irqueue.c45
-rw-r--r--net/irda/irsysctl.c17
-rw-r--r--net/irda/irttp.c1142
-rw-r--r--net/irda/qos.c17
-rw-r--r--net/irda/timer.c15
-rw-r--r--net/irda/wrapper.c197
-rw-r--r--net/netlink/af_netlink.c43
-rw-r--r--net/netsyms.c25
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/Config.in7
-rw-r--r--net/sched/Makefile10
-rw-r--r--net/sched/cls_api.c58
-rw-r--r--net/sched/cls_fw.c305
-rw-r--r--net/sched/cls_route.c594
-rw-r--r--net/sched/cls_rsvp.h81
-rw-r--r--net/sched/cls_u32.c59
-rw-r--r--net/sched/estimator.c4
-rw-r--r--net/sched/police.c61
-rw-r--r--net/sched/sch_api.c447
-rw-r--r--net/sched/sch_cbq.c335
-rw-r--r--net/sched/sch_csz.c17
-rw-r--r--net/sched/sch_fifo.c14
-rw-r--r--net/sched/sch_generic.c39
-rw-r--r--net/sched/sch_prio.c120
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_sfq.c40
-rw-r--r--net/sched/sch_tbf.c100
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/socket.c56
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/auth_null.c5
-rw-r--r--net/sunrpc/auth_unix.c4
-rw-r--r--net/sunrpc/sched.c11
-rw-r--r--net/sunrpc/svcsock.c25
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/unix/af_unix.c95
-rw-r--r--net/unix/garbage.c68
-rw-r--r--net/unix/sysctl_net_unix.c4
154 files changed, 15141 insertions, 11637 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index a8f77970e..9047eaa49 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -30,7 +30,6 @@
#include <linux/trdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <linux/string.h>
#include <linux/timer.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
@@ -68,6 +67,8 @@ struct rif_cache_s {
rif_cache rif_table[RIF_TABLE_SIZE]={ NULL, };
+static spinlock_t rif_lock = SPIN_LOCK_UNLOCKED;
+
#define RIF_TIMEOUT 60*10*HZ
#define RIF_CHECK_INTERVAL 60*HZ
@@ -230,6 +231,9 @@ static void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct devic
unsigned int hash;
rif_cache entry;
unsigned char *olddata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rif_lock, flags);
/*
* Broadcasts are single route as stated in RFC 1042
@@ -298,6 +302,8 @@ printk("source routing for %02X %02X %02X %02X %02X %02X\n",trh->daddr[0],
else
slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
olddata = skb->data;
+ spin_unlock_irqrestore(&rif_lock, flags);
+
skb_pull(skb, slack);
memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
}
@@ -312,7 +318,11 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct device *dev)
int i;
unsigned int hash, rii_p = 0;
rif_cache entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rif_lock, flags);
+
/*
* Firstly see if the entry exists
*/
@@ -350,6 +360,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
if(!entry)
{
printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
+ spin_unlock_irqrestore(&rif_lock, flags);
return;
}
@@ -391,6 +402,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
}
entry->last_used=jiffies;
}
+ spin_unlock_irqrestore(&rif_lock, flags);
}
/*
@@ -402,9 +414,8 @@ static void rif_check_expire(unsigned long dummy)
int i;
unsigned long now=jiffies,flags;
- save_flags(flags);
- cli();
-
+ spin_lock_irqsave(&rif_lock, flags);
+
for(i=0; i < RIF_TABLE_SIZE;i++)
{
rif_cache entry, *pentry=rif_table+i;
@@ -422,7 +433,8 @@ static void rif_check_expire(unsigned long dummy)
pentry=&entry->next;
}
}
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&rif_lock, flags);
/*
* Reset the timer
diff --git a/net/Config.in b/net/Config.in
index ae9abe2e5..ed8510209 100644
--- a/net/Config.in
+++ b/net/Config.in
@@ -10,7 +10,6 @@ if [ "$CONFIG_NETLINK" = "y" ]; then
tristate 'Netlink device emulation' CONFIG_NETLINK_DEV
fi
bool 'Network firewalls' CONFIG_FIREWALL
-bool 'Network aliasing' CONFIG_NET_ALIAS
bool 'Socket Filtering' CONFIG_FILTER
tristate 'Unix domain sockets' CONFIG_UNIX
bool 'TCP/IP networking' CONFIG_INET
diff --git a/net/Makefile b/net/Makefile
index 9131266d1..d20953259 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -63,6 +63,8 @@ endif
ifeq ($(CONFIG_IPX),y)
SUB_DIRS += ipx
+# SPX can be still a module
+MOD_SUB_DIRS += ipx
else
ifeq ($(CONFIG_IPX),m)
MOD_SUB_DIRS += ipx
@@ -127,6 +129,8 @@ endif
ifeq ($(CONFIG_IRDA),y)
SUB_DIRS += irda
+# There might be some irda features that are compiled as modules
+MOD_IN_SUB_DIRS += irda
else
ifeq ($(CONFIG_IRDA),m)
MOD_SUB_DIRS += irda
diff --git a/net/README b/net/README
index 9281cc13d..6f1b0beb8 100644
--- a/net/README
+++ b/net/README
@@ -16,7 +16,7 @@ ipx/spx Jay.Schulist@spacs.k12.wi.us
lapb g4klx@g4klx.demon.co.uk
netrom g4klx@g4klx.demon.co.uk
rose g4klx@g4klx.demon.co.uk
-wanrouter genek@compuserve.com and dm@sangoma.com
+wanrouter gene@compuserve.com, jaspreet@sangoma and dm@sangoma.com
unix alan@lxorguk.ukuu.org.uk
x25 g4klx@g4klx.demon.co.uk
diff --git a/net/TUNABLE b/net/TUNABLE
index b853cc42b..9913211f0 100644
--- a/net/TUNABLE
+++ b/net/TUNABLE
@@ -5,10 +5,7 @@ This is far from complete
Item Description
----------------------------------------------------------------------------
-MAX_SOCKETS Tunable on boot, maximum sockets we will allocate
-NUM_PROTO Maximum loadable address family, will need recompile
MAX_LINKS Maximum number of netlink minor devices. (1-32)
-MAX_QBYTES Size of a netlink device queue (tunable)
RIF_TABLE_SIZE Token ring RIF cache size (tunable)
AARP_HASH_SIZE Size of Appletalk hash table (tunable)
AX25_DEF_T1 AX.25 parameters. These are all tunable via
@@ -34,18 +31,9 @@ MAX_WINDOW Offered maximum window (tunable)
MAX_HEADER Largest physical header (tunable)
MAX_ADDR_LEN Largest physical address (tunable)
SOCK_ARRAY_SIZE IP socket array hash size (tunable)
-ARP_RES_TIME Time we try to resolve (tunable)
-ARP_DEAD_RES_TIME Time the entry stays dead (tunable)
-ARP_MAX_TRIES Maximum tries (tunable)
-ARP_TIMEOUT Timeout on an ARP (tunable)
-ARP_CHECK_INTERVAL Check interval to refresh an arp (tunable)
-ARP_CONFIRM_INTERVAL Confirm poll time (tunable)
-ARP_TABLE_SIZE Hash table size for ARP (tunable)
IP_MAX_MEMBERSHIPS Largest number of groups per socket (BSD style) (tunable)
16 Hard coded constant for amount of room allowed for
cache align and faster forwarding (tunable)
-IPFRAG_HIGH_THRESH Limit on fragments, we free fragments until we reach
-IPFRAG_LOW_THRESH which provides some breathing space. (tunable)
IP_FRAG_TIME Time we hold a fragment for. (tunable)
PORT_MASQ_BEGIN First port reserved for masquerade (tunable)
PORT_MASQ_END Last port used for masquerade (tunable)
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 6d59b2338..79c6c7e45 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -128,6 +128,8 @@ static void aarp_send_query(struct aarp_entry *a)
skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length);
eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp));
+ skb->protocol = htons(ETH_P_ATALK);
+ skb->nh.raw = skb->h.raw = (void *) eah;
skb->dev = dev;
/*
@@ -186,6 +188,8 @@ static void aarp_send_reply(struct device *dev, struct at_addr *us, struct at_ad
skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length);
eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp));
+ skb->protocol = htons(ETH_P_ATALK);
+ skb->nh.raw = skb->h.raw = (void *) eah;
skb->dev = dev;
/*
@@ -246,7 +250,8 @@ void aarp_send_probe(struct device *dev, struct at_addr *us)
skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length);
eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp));
-
+ skb->protocol = htons(ETH_P_ATALK);
+ skb->nh.raw = skb->h.raw = (void *) eah;
skb->dev = dev;
/*
@@ -365,12 +370,10 @@ static void aarp_expire_timeout(unsigned long unused)
aarp_expire_timer(&unresolved[ct]);
aarp_expire_timer(&proxies[ct]);
}
- del_timer(&aarp_timer);
- if(unresolved_count==0)
- aarp_timer.expires=jiffies+sysctl_aarp_expiry_time;
- else
- aarp_timer.expires=jiffies+sysctl_aarp_tick_time;
- add_timer(&aarp_timer);
+
+ mod_timer(&aarp_timer, jiffies +
+ (unresolved_count ? sysctl_aarp_tick_time:
+ sysctl_aarp_expiry_time));
}
/*
@@ -750,9 +753,7 @@ int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, vo
if(unresolved_count==1)
{
- del_timer(&aarp_timer);
- aarp_timer.expires=jiffies+sysctl_aarp_tick_time;
- add_timer(&aarp_timer);
+ mod_timer(&aarp_timer, jiffies + sysctl_aarp_tick_time);
}
/*
@@ -939,9 +940,8 @@ static int aarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type
aarp_resolved(&unresolved[hash],a,hash);
if(unresolved_count==0)
{
- del_timer(&aarp_timer);
- aarp_timer.expires=jiffies+sysctl_aarp_expiry_time;
- add_timer(&aarp_timer);
+ mod_timer(&aarp_timer, jiffies +
+ sysctl_aarp_expiry_time);
}
break;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 735825842..4c8c6e390 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -356,7 +356,7 @@ static int atif_probe_device(struct atalk_iface *atif)
/*
* Scan the networks.
*/
-
+ atif->status |= ATIF_PROBE;
for(netct = 0; netct <= netrange; netct++)
{
/*
@@ -374,8 +374,10 @@ static int atif_probe_device(struct atalk_iface *atif)
*/
aarp_probe_network(atif);
- if(!(atif->status & ATIF_PROBE_FAIL))
+ if(!(atif->status & ATIF_PROBE_FAIL)) {
+ atif->status &= ~ATIF_PROBE;
return (0);
+ }
}
atif->status &= ~ATIF_PROBE_FAIL;
}
@@ -383,7 +385,7 @@ static int atif_probe_device(struct atalk_iface *atif)
if(probe_net > ntohs(atif->nets.nr_lastnet))
probe_net = ntohs(atif->nets.nr_firstnet);
}
-
+ atif->status &= ~ATIF_PROBE;
return (-EADDRINUSE); /* Network is full... */
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f9392fe34..a2ad7111f 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -957,7 +957,7 @@ static int ax25_release(struct socket *sock, struct socket *peer)
ax25_stop_t3timer(sk->protinfo.ax25);
ax25_stop_idletimer(sk->protinfo.ax25);
break;
-#ifdef AX25_CONFIG_DAMA_SLAVE
+#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_stop_t3timer(sk->protinfo.ax25);
ax25_stop_idletimer(sk->protinfo.ax25);
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 204b00ca0..996aeb718 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -6,7 +6,7 @@
* More hacks to be able to switch protocols on and off by Christoph Lameter
* <clameter@debian.org>
* Software and more Documentation for the bridge is available from ftp.debian.org
- * in the bridge package or at ftp.fuller.edu/Linux/bridge
+ * in the bridgex package
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -63,7 +63,6 @@
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -1239,6 +1238,13 @@ static int send_config_bpdu(int port_no, Config_bpdu *config_bpdu)
struct sk_buff *skb;
/*
+ * Keep silent when disabled or when STP disabled
+ */
+
+ if(!(br_stats.flags & BR_UP) || (br_stats.flags & BR_STP_DISABLED))
+ return -1;
+
+ /*
* Create and send the message
*/
@@ -1270,6 +1276,14 @@ static int send_config_bpdu(int port_no, Config_bpdu *config_bpdu)
static int send_tcn_bpdu(int port_no, Tcn_bpdu *bpdu)
{
struct sk_buff *skb;
+
+ /*
+ * Keep silent when disabled or when STP disabled
+ */
+
+ if(!(br_stats.flags & BR_UP) || (br_stats.flags & BR_STP_DISABLED))
+ return -1;
+
skb = alloc_bridge_skb(port_no, sizeof(Tcn_bpdu), "tcn");
if (skb == NULL)
@@ -1336,13 +1350,25 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
enable_port(i);
set_path_cost(i, br_port_cost(dev));
set_port_priority(i);
- make_forwarding(i);
+ if (br_stats.flags & BR_STP_DISABLED)
+ port_info[i].state = Forwarding;
+ else
+ make_forwarding(i);
}
return NOTIFY_DONE;
break;
}
}
break;
+ case NETDEV_UNREGISTER:
+ if (br_stats.flags & BR_DEBUG)
+ printk(KERN_DEBUG "br_device_event: NETDEV_UNREGISTER...\n");
+ i = find_port(dev);
+ if (i > 0) {
+ br_avl_delete_by_port(i);
+ port_info[i].dev = NULL;
+ }
+ break;
}
return NOTIFY_DONE;
}
@@ -1833,6 +1859,8 @@ static int find_port(struct device *dev)
static int br_port_cost(struct device *dev) /* 4.10.2 */
{
+ if (strncmp(dev->name, "lec", 3) == 0) /* ATM Lan Emulation (LANE) */
+ return(7); /* 155 Mbs */
if (strncmp(dev->name, "eth", 3) == 0) /* ethernet */
return(100);
if (strncmp(dev->name, "plip",4) == 0) /* plip */
@@ -1850,7 +1878,8 @@ static void br_bpdu(struct sk_buff *skb, int port) /* consumes skb */
Tcn_bpdu *bpdu = (Tcn_bpdu *) (bufp + BRIDGE_LLC1_HS);
Config_bpdu rcv_bpdu;
- if((*bufp++ == BRIDGE_LLC1_DSAP) && (*bufp++ == BRIDGE_LLC1_SSAP) &&
+ if(!(br_stats.flags & BR_STP_DISABLED) &&
+ (*bufp++ == BRIDGE_LLC1_DSAP) && (*bufp++ == BRIDGE_LLC1_SSAP) &&
(*bufp++ == BRIDGE_LLC1_CTRL) &&
(bpdu->protocol_id == BRIDGE_BPDU_8021_PROTOCOL_ID) &&
(bpdu->protocol_version_id == BRIDGE_BPDU_8021_PROTOCOL_VERSION_ID))
@@ -1970,6 +1999,10 @@ int br_ioctl(unsigned int cmd, void *arg)
}
}
port_state_selection(); /* (4.8.1.5) */
+ if (br_stats.flags & BR_STP_DISABLED)
+ for(i=One;i<=No_of_ports; i++)
+ if((user_port_state[i] != Disabled) && port_info[i].dev)
+ port_info[i].state = Forwarding;
config_bpdu_generation(); /* (4.8.1.6) */
/* initialize system timer */
tl.expires = jiffies+HZ; /* 1 second */
@@ -1987,6 +2020,20 @@ int br_ioctl(unsigned int cmd, void *arg)
if (port_info[i].state != Disabled)
disable_port(i);
break;
+ case BRCMD_TOGGLE_STP:
+ printk(KERN_DEBUG "br: %s spanning tree protcol\n",
+ (br_stats.flags & BR_STP_DISABLED) ? "enabling" : "disabling");
+ if (br_stats.flags & BR_STP_DISABLED) { /* enable STP */
+ for(i=One;i<=No_of_ports; i++)
+ if((user_port_state[i] != Disabled) && port_info[i].dev)
+ enable_port(i);
+ } else { /* STP was enabled, now disable it */
+ for (i = One; i <= No_of_ports; i++)
+ if (port_info[i].state != Disabled && port_info[i].dev)
+ port_info[i].state = Forwarding;
+ }
+ br_stats.flags ^= BR_STP_DISABLED;
+ break;
case BRCMD_PORT_ENABLE:
if (port_info[bcf.arg1].dev == 0)
return(-EINVAL);
diff --git a/net/bridge/br_tree.c b/net/bridge/br_tree.c
index 0936a0f8b..c1ed82f10 100644
--- a/net/bridge/br_tree.c
+++ b/net/bridge/br_tree.c
@@ -474,3 +474,27 @@ void sprintf_avl (char **pbuffer, struct fdb * tree, off_t *pos,
return;
}
+
+/*
+ * Delete all nodes learnt by the port
+ */
+void br_avl_delete_by_port(int port)
+{
+ struct fdb *fdb, *next;
+
+ if (!fdb_inited)
+ fdb_init();
+
+ for(fdb = port_info[port].fdb; fdb != NULL; fdb = next) {
+ next = fdb->fdb_next;
+ br_avl_remove(fdb);
+ }
+ port_info[port].fdb = NULL;
+
+ /* remove the local mac too */
+ next = br_avl_find_addr(port_info[port].dev->dev_addr);
+ if (next != NULL)
+ br_avl_remove(next);
+
+ return;
+}
diff --git a/net/core/dev.c b/net/core/dev.c
index 95178439b..921f05470 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -233,6 +233,7 @@ void dev_remove_pack(struct packet_type *pt)
if(pt==(*pt1))
{
*pt1=pt->next;
+ synchronize_bh();
#ifdef CONFIG_NET_FASTROUTE
if (pt->data)
netdev_fastroute_obstacles--;
@@ -328,6 +329,12 @@ struct device *dev_alloc(const char *name, int *err)
return dev;
}
+void netdev_state_change(struct device *dev)
+{
+ if (dev->flags&IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
+}
+
/*
* Find and possibly load an interface.
@@ -422,7 +429,7 @@ static __inline__ void dev_do_clear_fastroute(struct device *dev)
int i;
for (i=0; i<=NETDEV_FASTROUTE_HMASK; i++)
- dst_release(xchg(dev->fastpath+i, NULL));
+ dst_release_irqwait(xchg(dev->fastpath+i, NULL));
}
}
@@ -895,22 +902,6 @@ void net_bh(void)
#endif
/*
- * Fetch the packet protocol ID.
- */
-
- type = skb->protocol;
-
-
-#ifdef CONFIG_BRIDGE
- /*
- * If we are bridging then pass the frame up to the
- * bridging code (if this protocol is to be bridged).
- * If it is bridged then move on
- */
- handle_bridge(skb, type);
-#endif
-
- /*
* Bump the pointer to the next structure.
*
* On entry to the protocol layer. skb->data and
@@ -927,11 +918,26 @@ void net_bh(void)
}
/*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+#ifdef CONFIG_BRIDGE
+ /*
+ * If we are bridging then pass the frame up to the
+ * bridging code (if this protocol is to be bridged).
+ * If it is bridged then move on
+ */
+ handle_bridge(skb, type);
+#endif
+
+ /*
* We got a packet ID. Now loop over the "known protocols"
* list. There are two lists. The ptype_all list of taps (normally empty)
* and the main protocol list which is hashed perfectly for normal protocols.
*/
-
+
pt_prev = NULL;
for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
{
@@ -1536,8 +1542,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return 0;
case SIOCSIFTXQLEN:
- /* Why <2? 0 and 1 are valid values. --ANK (980807) */
- if(/*ifr->ifr_qlen<2 ||*/ ifr->ifr_qlen>1024)
+ if(ifr->ifr_qlen<0)
return -EINVAL;
dev->tx_queue_len = ifr->ifr_qlen;
return 0;
@@ -1818,7 +1823,9 @@ int unregister_netdevice(struct device *dev)
for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
if (d == dev) {
*dp = d->next;
+ synchronize_bh();
d->next = NULL;
+
if (dev->destructor)
dev->destructor(dev);
return 0;
@@ -1978,6 +1985,7 @@ __initfunc(int net_dev_init(void))
* It failed to come up. Unhook it.
*/
*dp = dev->next;
+ synchronize_bh();
}
else
{
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index a724497e0..bce3f4a4a 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -56,10 +56,9 @@
* protocols without doing damage to the protocols when it deletes the
* entries. It also helps IP as it tracks overlapping maps.
*
- * BUGGGG! IPv6 calls dev_mac_add/delete from BH, it means
- * that all the functions in this file are racy. [NOT FIXED] --ANK
+ * Device mc lists are changed by bh at least if IPv6 is enabled,
+ * so that it must be bh protected.
*/
-
/*
* Update the multicast list into the physical NIC controller.
@@ -77,11 +76,13 @@ void dev_mc_upload(struct device *dev)
/*
* Devices with no set multicast don't get set
*/
-
+
if(dev->set_multicast_list==NULL)
return;
-
+
+ start_bh_atomic();
dev->set_multicast_list(dev);
+ end_bh_atomic();
}
/*
@@ -90,8 +91,10 @@ void dev_mc_upload(struct device *dev)
int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
{
+ int err = 0;
struct dev_mc_list *dmi, **dmip;
+ start_bh_atomic();
for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
/*
* Find the entry we want to delete. The device could
@@ -102,10 +105,10 @@ int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
int old_glbl = dmi->dmi_gusers;
dmi->dmi_gusers = 0;
if (old_glbl == 0)
- return -ENOENT;
+ break;
}
if(--dmi->dmi_users)
- return 0;
+ goto done;
/*
* Last user. So delete the entry.
@@ -117,11 +120,15 @@ int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
* We have altered the list, so the card
* loaded filter is now wrong. Fix it
*/
+ end_bh_atomic();
dev_mc_upload(dev);
return 0;
}
}
- return -ENOENT;
+ err = -ENOENT;
+done:
+ end_bh_atomic();
+ return err;
}
/*
@@ -130,30 +137,27 @@ int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
{
- struct dev_mc_list *dmi;
+ int err = 0;
+ struct dev_mc_list *dmi, *dmi1;
+
+ dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), gfp_any());
+ start_bh_atomic();
for(dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) {
if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && dmi->dmi_addrlen==alen) {
if (glbl) {
int old_glbl = dmi->dmi_gusers;
dmi->dmi_gusers = 1;
if (old_glbl)
- return 0;
+ goto done;
}
dmi->dmi_users++;
- return 0;
+ goto done;
}
}
- /* GFP_ATOMIC!! It is used by IPv6 from interrupt,
- when new address arrives.
-
- Particularly, it means that this part of code is weirdly
- racy, and needs numerous *_bh_atomic --ANK
- */
- dmi=(struct dev_mc_list *)kmalloc(sizeof(*dmi), GFP_ATOMIC);
- if (dmi==NULL)
- return -ENOBUFS;
+ if ((dmi=dmi1)==NULL)
+ return -ENOMEM;
memcpy(dmi->dmi_addr, addr, alen);
dmi->dmi_addrlen=alen;
dmi->next=dev->mc_list;
@@ -161,8 +165,15 @@ int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
dmi->dmi_gusers=glbl ? 1 : 0;
dev->mc_list=dmi;
dev->mc_count++;
+ end_bh_atomic();
dev_mc_upload(dev);
return 0;
+
+done:
+ end_bh_atomic();
+ if (dmi1)
+ kfree(dmi1);
+ return err;
}
/*
@@ -171,6 +182,7 @@ int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
void dev_mc_discard(struct device *dev)
{
+ start_bh_atomic();
while (dev->mc_list!=NULL) {
struct dev_mc_list *tmp=dev->mc_list;
dev->mc_list=tmp->next;
@@ -179,6 +191,7 @@ void dev_mc_discard(struct device *dev)
kfree_s(tmp,sizeof(*tmp));
}
dev->mc_count=0;
+ end_bh_atomic();
}
#ifdef CONFIG_PROC_FS
@@ -189,7 +202,9 @@ static int dev_mc_read_proc(char *buffer, char **start, off_t offset,
struct dev_mc_list *m;
int len=0;
struct device *dev;
-
+
+ start_bh_atomic();
+
for (dev = dev_base; dev; dev = dev->next) {
for (m = dev->mc_list; m; m = m->next) {
int i;
@@ -214,10 +229,13 @@ static int dev_mc_read_proc(char *buffer, char **start, off_t offset,
*eof = 1;
done:
+ end_bh_atomic();
*start=buffer+(offset-begin);
len-=(offset-begin);
if(len>length)
len=length;
+ if(len<0)
+ len=0;
return len;
}
#endif
diff --git a/net/core/filter.c b/net/core/filter.c
index a60d8f1e5..cc1ed83cd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -11,6 +11,8 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
+ *
+ * Andi Kleen - Fix a few bad bugs and races.
*/
#include <linux/config.h>
@@ -36,6 +38,22 @@
#include <asm/uaccess.h>
#include <linux/filter.h>
+/* No hurry in this branch */
+
+static u8 *load_pointer(struct sk_buff *skb, int k)
+{
+ u8 *ptr = NULL;
+
+ if (k>=SKF_NET_OFF)
+ ptr = skb->nh.raw + k - SKF_NET_OFF;
+ else if (k>=SKF_LL_OFF)
+ ptr = skb->mac.raw + k - SKF_LL_OFF;
+
+ if (ptr<skb->head && ptr < skb->tail)
+ return ptr;
+ return NULL;
+}
+
/*
* Decode and apply filter instructions to the skb->data.
* Return length to keep, 0 for none. skb is the data we are
@@ -43,15 +61,19 @@
* len is the number of filter blocks in the array.
*/
-int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int flen)
+int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
{
+ unsigned char *data = skb->data;
+ /* len is UNSIGNED. Byte wide insns relies only on implicit
+ type casts to prevent reading arbitrary memory locations.
+ */
+ unsigned int len = skb->len;
struct sock_filter *fentry; /* We walk down these */
u32 A = 0; /* Accumulator */
u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
int k;
int pc;
- int *t;
/*
* Process array of filter instructions.
@@ -60,53 +82,75 @@ int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int
for(pc = 0; pc < flen; pc++)
{
fentry = &filter[pc];
- if(fentry->code & BPF_X)
- t=&X;
- else
- t=&fentry->k;
switch(fentry->code)
{
case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+
case BPF_ALU|BPF_ADD|BPF_K:
- A += *t;
+ A += fentry->k;
continue;
case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+
case BPF_ALU|BPF_SUB|BPF_K:
- A -= *t;
+ A -= fentry->k;
continue;
case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+
case BPF_ALU|BPF_MUL|BPF_K:
- A *= *t;
+ A *= X;
continue;
case BPF_ALU|BPF_DIV|BPF_X:
+ if(X == 0)
+ return (0);
+ A /= X;
+ continue;
+
case BPF_ALU|BPF_DIV|BPF_K:
- if(*t == 0)
+ if(fentry->k == 0)
return (0);
- A /= *t;
+ A /= fentry->k;
continue;
case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+
case BPF_ALU|BPF_AND|BPF_K:
- A &= *t;
+ A &= fentry->k;
continue;
case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+
case BPF_ALU|BPF_OR|BPF_K:
- A |= *t;
+ A |= fentry->k;
continue;
case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+
case BPF_ALU|BPF_LSH|BPF_K:
- A <<= *t;
+ A <<= fentry->k;
continue;
case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+
case BPF_ALU|BPF_RSH|BPF_K:
- A >>= *t;
+ A >>= fentry->k;
continue;
case BPF_ALU|BPF_NEG:
@@ -148,26 +192,62 @@ int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int
case BPF_JMP|BPF_JSET|BPF_X:
pc += (A & X) ? fentry->jt : fentry->jf;
continue;
+
case BPF_LD|BPF_W|BPF_ABS:
k = fentry->k;
- if(k + sizeof(long) > len)
- return (0);
- A = ntohl(*(long*)&data[k]);
- continue;
+load_w:
+ if(k+sizeof(u32) <= len) {
+ A = ntohl(*(u32*)&data[k]);
+ continue;
+ }
+ if (k<0) {
+ u8 *ptr;
+
+ if (k>=SKF_AD_OFF)
+ break;
+ if ((ptr = load_pointer(skb, k)) != NULL) {
+ A = ntohl(*(u32*)ptr);
+ continue;
+ }
+ }
+ return 0;
case BPF_LD|BPF_H|BPF_ABS:
k = fentry->k;
- if(k + sizeof(short) > len)
- return (0);
- A = ntohs(*(short*)&data[k]);
- continue;
+load_h:
+ if(k + sizeof(u16) <= len) {
+ A = ntohs(*(u16*)&data[k]);
+ continue;
+ }
+ if (k<0) {
+ u8 *ptr;
+
+ if (k>=SKF_AD_OFF)
+ break;
+ if ((ptr = load_pointer(skb, k)) != NULL) {
+ A = ntohs(*(u16*)ptr);
+ continue;
+ }
+ }
+ return 0;
case BPF_LD|BPF_B|BPF_ABS:
k = fentry->k;
- if(k >= len)
- return (0);
- A = data[k];
- continue;
+load_b:
+ if(k < len) {
+ A = data[k];
+ continue;
+ }
+ if (k<0) {
+ u8 *ptr;
+
+ if (k>=SKF_AD_OFF)
+ break;
+ if ((ptr = load_pointer(skb, k)) != NULL) {
+ A = *ptr;
+ continue;
+ }
+ }
case BPF_LD|BPF_W|BPF_LEN:
A = len;
@@ -177,35 +257,23 @@ int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int
X = len;
continue;
- case BPF_LD|BPF_W|BPF_IND:
+ case BPF_LD|BPF_W|BPF_IND:
k = X + fentry->k;
- if(k + sizeof(u32) > len)
- return (0);
- A = ntohl(*(u32 *)&data[k]);
- continue;
+ goto load_w;
case BPF_LD|BPF_H|BPF_IND:
k = X + fentry->k;
- if(k + sizeof(u16) > len)
- return (0);
- A = ntohs(*(u16*)&data[k]);
- continue;
+ goto load_h;
case BPF_LD|BPF_B|BPF_IND:
k = X + fentry->k;
- if(k >= len)
- return (0);
- A = data[k];
- continue;
+ goto load_b;
case BPF_LDX|BPF_B|BPF_MSH:
- /*
- * Hack for BPF to handle TOS etc
- */
k = fentry->k;
if(k >= len)
return (0);
- X = (data[fentry->k] & 0xf) << 2;
+ X = (data[k] & 0xf) << 2;
continue;
case BPF_LD|BPF_IMM:
@@ -216,7 +284,7 @@ int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int
X = fentry->k;
continue;
- case BPF_LD|BPF_MEM:
+ case BPF_LD|BPF_MEM:
A = mem[fentry->k];
continue;
@@ -246,15 +314,29 @@ int sk_run_filter(unsigned char *data, int len, struct sock_filter *filter, int
mem[fentry->k] = X;
continue;
-
-
default:
/* Invalid instruction counts as RET */
return (0);
}
+
+ /* Handle ancillary data, which are impossible
+ (or very difficult) to get parsing packet contents.
+ */
+ switch (k-SKF_AD_OFF) {
+ case SKF_AD_PROTOCOL:
+ A = htons(skb->protocol);
+ continue;
+ case SKF_AD_PKTTYPE:
+ A = skb->pkt_type;
+ continue;
+ case SKF_AD_IFINDEX:
+ A = skb->dev->ifindex;
+ continue;
+ default:
+ return 0;
+ }
}
- printk(KERN_ERR "Filter ruleset ran off the end.\n");
return (0);
}
@@ -279,13 +361,17 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
ftest = &filter[pc];
if(BPF_CLASS(ftest->code) == BPF_JMP)
- {
+ {
/*
* But they mustn't jump off the end.
*/
if(BPF_OP(ftest->code) == BPF_JA)
{
- if(pc + ftest->k + 1>= (unsigned)flen)
+ /* Note, the large ftest->k might cause
+ loops. Compare this with conditional
+ jumps below, where offsets are limited. --ANK (981016)
+ */
+ if (ftest->k >= (unsigned)(flen-pc-1))
return (-EINVAL);
}
else
@@ -302,17 +388,18 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
* Check that memory operations use valid addresses.
*/
- if(ftest->k <0 || ftest->k >= BPF_MEMWORDS)
+ if (ftest->k >= BPF_MEMWORDS)
{
/*
* But it might not be a memory operation...
*/
-
- if (BPF_CLASS(ftest->code) == BPF_ST)
+ switch (ftest->code) {
+ case BPF_ST:
+ case BPF_STX:
+ case BPF_LD|BPF_MEM:
+ case BPF_LDX|BPF_MEM:
return -EINVAL;
- if((BPF_CLASS(ftest->code) == BPF_LD) &&
- (BPF_MODE(ftest->code) == BPF_MEM))
- return (-EINVAL);
+ }
}
}
@@ -332,35 +419,36 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
- struct sock_filter *fp, *old_filter;
- int fsize = sizeof(struct sock_filter) * fprog->len;
+ struct sk_filter *fp;
+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
int err;
/* Make sure new filter is there and in the right amounts. */
- if(fprog->filter == NULL || fprog->len == 0 || fsize > BPF_MAXINSNS)
+ if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS)
return (-EINVAL);
- if((err = sk_chk_filter(fprog->filter, fprog->len))==0)
- {
- /* If existing filter, remove it first */
- if(sk->filter)
- {
- old_filter = sk->filter_data;
- kfree_s(old_filter, (sizeof(old_filter) * sk->filter));
- sk->filter_data = NULL;
- }
+ fp = (struct sk_filter *)sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+ if(fp == NULL)
+ return (-ENOMEM);
- fp = (struct sock_filter *)kmalloc(fsize, GFP_KERNEL);
- if(fp == NULL)
- return (-ENOMEM);
+ if (copy_from_user(fp->insns, fprog->filter, fsize)) {
+ sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+ return -EFAULT;
+ }
- memset(fp,0,sizeof(*fp));
- memcpy(fp, fprog->filter, fsize); /* Copy instructions */
+ atomic_set(&fp->refcnt, 1);
+ fp->len = fprog->len;
- sk->filter = fprog->len; /* Number of filter blocks */
- sk->filter_data = fp; /* Filter instructions */
+ if ((err = sk_chk_filter(fp->insns, fp->len))==0) {
+ struct sk_filter *old_fp = sk->filter;
+ sk->filter = fp;
+ synchronize_bh();
+ fp = old_fp;
}
+ if (fp)
+ sk_filter_release(sk, fp);
+
return (err);
}
#endif /* CONFIG_FILTER */
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 8919fc5c1..c20f85303 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -59,8 +59,15 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode)
goto out;
m->msg_iov=iov;
- for (err = 0, ct = 0; ct < m->msg_iovlen; ct++)
+ for (err = 0, ct = 0; ct < m->msg_iovlen; ct++) {
err += iov[ct].iov_len;
+ /* Goal is not to verify user data, but to prevent returning
+ negative value, which is interpreted as errno.
+ Overflow is still possible, but it is harmless.
+ */
+ if (err < 0)
+ return -EMSGSIZE;
+ }
out:
return err;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 637322f65..b96650bcd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -25,8 +25,36 @@
#endif
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/sock.h>
#include <linux/rtnetlink.h>
+/*
+ NOTE. The most unpleasent question is serialization of
+ accesses to resolved addresses. The problem is that addresses
+ are modified by bh, but they are referenced from normal
+ kernel thread. Before today no locking was made.
+ My reasoning was that corrupted address token will be copied
+ to packet with cosmologically small probability
+ (it is even difficult to estimate such small number)
+ and it is very silly to waste cycles in fast path to lock them.
+
+ But now I changed my mind, but not because previous statement
+ is wrong. Actually, neigh->ha MAY BE not opaque byte array,
+ but reference to some private data. In this case even neglibible
+ corruption probability becomes bug.
+
+ - hh cache is protected by rwlock. It assumes that
+ hh cache update procedure is short and fast, and that
+ read_lock is cheaper than start_bh_atomic().
+ - ha tokens, saved in neighbour entries, are protected
+ by bh_atomic().
+ - no protection is made in /proc reading. It is OK, because
+ /proc is broken by design in any case, and
+ corrupted output is normal behaviour there.
+
+ --ANK (981025)
+ */
+
#define NEIGH_DEBUG 1
#define NEIGH_PRINTK(x...) printk(x)
@@ -48,6 +76,7 @@ static void neigh_timer_handler(unsigned long arg);
#ifdef CONFIG_ARPD
static void neigh_app_notify(struct neighbour *n);
#endif
+static int pneigh_ifdown(struct neigh_table *tbl, struct device *dev);
static int neigh_glbl_allocs;
static struct neigh_table *neigh_tables;
@@ -83,8 +112,20 @@ static int neigh_forced_gc(struct neigh_table *tbl)
np = &tbl->hash_buckets[i];
while ((n = *np) != NULL) {
+ /* Neighbour record may be discarded if:
+ - nobody refers to it.
+ - it is not premanent
+ - (NEW and probably wrong)
+ INCOMPLETE entries are kept at least for
+ n->parms->retrans_time, otherwise we could
+ flood network with resolution requests.
+ It is not clear, what is better table overflow
+ or flooding.
+ */
if (atomic_read(&n->refcnt) == 0 &&
- !(n->nud_state&NUD_PERMANENT)) {
+ !(n->nud_state&NUD_PERMANENT) &&
+ (n->nud_state != NUD_INCOMPLETE ||
+ jiffies - n->used > n->parms->retrans_time)) {
*np = n->next;
n->tbl = NULL;
tbl->entries--;
@@ -149,6 +190,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct device *dev)
del_timer(&tbl->proxy_timer);
skb_queue_purge(&tbl->proxy_queue);
+ pneigh_ifdown(tbl, dev);
end_bh_atomic();
return 0;
}
@@ -296,6 +338,7 @@ int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct device *dev)
for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
*np = n->next;
+ synchronize_bh();
if (tbl->pdestructor)
tbl->pdestructor(n);
kfree(n);
@@ -305,6 +348,29 @@ int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct device *dev)
return -ENOENT;
}
+static int pneigh_ifdown(struct neigh_table *tbl, struct device *dev)
+{
+ struct pneigh_entry *n, **np;
+ u32 h;
+
+ for (h=0; h<=PNEIGH_HASHMASK; h++) {
+ np = &tbl->phash_buckets[h];
+ for (np = &tbl->phash_buckets[h]; (n=*np) != NULL; np = &n->next) {
+ if (n->dev == dev || dev == NULL) {
+ *np = n->next;
+ synchronize_bh();
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ kfree(n);
+ continue;
+ }
+ np = &n->next;
+ }
+ }
+ return -ENOENT;
+}
+
+
/*
* neighbour must already be out of the table;
*
@@ -516,11 +582,11 @@ static void neigh_timer_handler(unsigned long arg)
return;
}
- neigh->probes++;
neigh->timer.expires = now + neigh->parms->retrans_time;
add_timer(&neigh->timer);
neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
+ neigh->probes++;
}
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
@@ -542,6 +608,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
add_timer(&neigh->timer);
neigh->ops->solicit(neigh, skb);
+ neigh->probes++;
} else {
neigh->nud_state = NUD_FAILED;
if (skb)
@@ -581,8 +648,11 @@ static __inline__ void neigh_update_hhs(struct neighbour *neigh)
neigh->dev->header_cache_update;
if (update) {
- for (hh=neigh->hh; hh; hh=hh->hh_next)
+ for (hh=neigh->hh; hh; hh=hh->hh_next) {
+ write_lock_irq(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
+ write_unlock_irq(&hh->hh_lock);
+ }
}
}
@@ -655,7 +725,7 @@ int neigh_update(struct neighbour *neigh, u8 *lladdr, u8 new, int override, int
del_timer(&neigh->timer);
neigh->nud_state = new;
if (lladdr != neigh->ha) {
- memcpy(neigh->ha, lladdr, dev->addr_len);
+ memcpy(&neigh->ha, lladdr, dev->addr_len);
neigh_update_hhs(neigh);
neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
#ifdef CONFIG_ARPD
@@ -764,14 +834,20 @@ int neigh_resolve_output(struct sk_buff *skb)
__skb_pull(skb, skb->nh.raw - skb->data);
if (neigh_event_send(neigh, skb) == 0) {
+ int err;
struct device *dev = neigh->dev;
- if (dev->hard_header_cache) {
+ if (dev->hard_header_cache && dst->hh == NULL) {
start_bh_atomic();
if (dst->hh == NULL)
neigh_hh_init(neigh, dst, dst->ops->protocol);
+ err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
+ end_bh_atomic();
+ } else {
+ start_bh_atomic();
+ err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
end_bh_atomic();
}
- if (dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len) >= 0)
+ if (err >= 0)
return neigh->ops->queue_xmit(skb);
kfree_skb(skb);
return -EINVAL;
@@ -788,13 +864,17 @@ discard:
int neigh_connected_output(struct sk_buff *skb)
{
+ int err;
struct dst_entry *dst = skb->dst;
struct neighbour *neigh = dst->neighbour;
struct device *dev = neigh->dev;
__skb_pull(skb, skb->nh.raw - skb->data);
- if (dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len) >= 0)
+ start_bh_atomic();
+ err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
+ end_bh_atomic();
+ if (err >= 0)
return neigh->ops->queue_xmit(skb);
kfree_skb(skb);
return -EINVAL;
@@ -868,7 +948,6 @@ struct neigh_parms *neigh_parms_alloc(struct device *dev, struct neigh_table *tb
}
}
p->next = tbl->parms.next;
- /* ATOMIC_SET */
tbl->parms.next = p;
}
return p;
@@ -882,8 +961,8 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
return;
for (p = &tbl->parms.next; *p; p = &(*p)->next) {
if (*p == parms) {
- /* ATOMIC_SET */
*p = parms->next;
+ synchronize_bh();
#ifdef CONFIG_SYSCTL
neigh_sysctl_unregister(parms);
#endif
@@ -926,14 +1005,14 @@ int neigh_table_clear(struct neigh_table *tbl)
del_timer(&tbl->gc_timer);
del_timer(&tbl->proxy_timer);
skb_queue_purge(&tbl->proxy_queue);
- if (tbl->entries)
- neigh_ifdown(tbl, NULL);
+ neigh_ifdown(tbl, NULL);
end_bh_atomic();
if (tbl->entries)
printk(KERN_CRIT "neighbour leakage\n");
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
*tp = tbl->next;
+ synchronize_bh();
break;
}
}
@@ -976,7 +1055,7 @@ int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -EINVAL;
start_bh_atomic();
- n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
+ n = __neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 0);
if (n) {
err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
neigh_release(n);
@@ -1020,7 +1099,7 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
return -EINVAL;
start_bh_atomic();
- n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
+ n = __neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 0);
if (n) {
if (nlh->nlmsg_flags&NLM_F_EXCL)
err = -EEXIST;
@@ -1091,7 +1170,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct
for (h=0; h <= NEIGH_HASHMASK; h++) {
if (h < s_h) continue;
if (h > s_h)
- memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(int));
+ s_idx = 0;
start_bh_atomic();
for (n = tbl->hash_buckets[h], idx = 0; n;
n = n->next, idx++) {
@@ -1100,12 +1179,14 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
end_bh_atomic();
- goto done;
+ cb->args[1] = h;
+ cb->args[2] = idx;
+ return -1;
}
}
end_bh_atomic();
}
-done:
+
cb->args[1] = h;
cb->args[2] = idx;
return skb->len;
@@ -1125,7 +1206,7 @@ int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (family && tbl->family != family)
continue;
if (t > s_t)
- memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(int));
+ memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
if (neigh_dump_table(tbl, skb, cb) < 0)
break;
}
@@ -1276,10 +1357,10 @@ int neigh_sysctl_register(struct device *dev, struct neigh_parms *p,
t->neigh_dev[0].ctl_name = dev->ifindex;
memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
} else {
- t->neigh_vars[12].data = (&p->locktime) + 1;
- t->neigh_vars[13].data = (&p->locktime) + 2;
- t->neigh_vars[14].data = (&p->locktime) + 3;
- t->neigh_vars[15].data = (&p->locktime) + 4;
+ t->neigh_vars[12].data = (int*)(p+1);
+ t->neigh_vars[13].data = (int*)(p+1) + 1;
+ t->neigh_vars[14].data = (int*)(p+1) + 2;
+ t->neigh_vars[15].data = (int*)(p+1) + 3;
}
t->neigh_neigh_dir[0].ctl_name = pdev_id;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e1fe88701..ed27c8e1d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -41,7 +41,6 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <net/pkt_sched.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
@@ -49,6 +48,7 @@
#include <net/tcp.h>
#include <net/udp.h>
#include <net/sock.h>
+#include <net/pkt_sched.h>
atomic_t rtnl_rlockct;
struct wait_queue *rtnl_wait;
diff --git a/net/core/scm.c b/net/core/scm.c
index c28da7ebb..cdb5f3d03 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -27,7 +27,6 @@
#include <asm/uaccess.h>
#include <linux/inet.h>
-#include <linux/netdevice.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/rarp.h>
@@ -123,7 +122,15 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
err = -EINVAL;
/* Verify that cmsg_len is at least sizeof(struct cmsghdr) */
- if ((unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ /* The first check was omitted in <= 2.2.5. The reasoning was
+ that parser checks cmsg_len in any case, so that
+ additional check would be work duplication.
+ But if cmsg_level is not SOL_SOCKET, we do not check
+ for too short ancillary data object at all! Oops.
+ OK, let's add it...
+ */
+ if (cmsg->cmsg_len < sizeof(struct cmsghdr) ||
+ (unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ cmsg->cmsg_len) > msg->msg_controllen)
goto error;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a03d284e7..b76364371 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4,7 +4,7 @@
* Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
- * Version: $Id: skbuff.c,v 1.54 1998/09/15 02:11:09 davem Exp $
+ * Version: $Id: skbuff.c,v 1.55 1999/02/23 08:12:27 davem Exp $
*
* Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs.
@@ -304,6 +304,9 @@ struct sk_buff *skb_copy(struct sk_buff *skb, int gfp_mask)
n->stamp=skb->stamp;
n->destructor = NULL;
n->security=skb->security;
+#ifdef CONFIG_IP_FIREWALL
+ n->fwmark = skb->fwmark;
+#endif
return n;
}
@@ -350,6 +353,9 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, int newheadroom)
n->stamp=skb->stamp;
n->destructor = NULL;
n->security=skb->security;
+#ifdef CONFIG_IP_FIREWALL
+ n->fwmark = skb->fwmark;
+#endif
return n;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index caaaa21e6..e0eb41a01 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -7,7 +7,7 @@
* handler for protocols to use and generic option handler.
*
*
- * Version: $Id: sock.c,v 1.75 1998/11/07 10:54:38 davem Exp $
+ * Version: $Id: sock.c,v 1.80 1999/05/08 03:04:34 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -150,15 +150,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
char *optval, int optlen)
{
struct sock *sk=sock->sk;
+#ifdef CONFIG_FILTER
+ struct sk_filter *filter;
+#endif
int val;
int valbool;
int err;
struct linger ling;
int ret = 0;
-
-#ifdef CONFIG_FILTER
- struct sock_fprog fprog;
-#endif
/*
* Options without arguments
@@ -255,13 +254,12 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_PRIORITY:
- if (val >= 0 && val <= 7)
+ if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
sk->priority = val;
else
- return(-EINVAL);
+ return(-EPERM);
break;
-
case SO_LINGER:
if(optlen<sizeof(ling))
return -EINVAL; /* 1003.1g */
@@ -310,10 +308,12 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
if (optlen > IFNAMSIZ)
optlen = IFNAMSIZ;
if (copy_from_user(devname, optval, optlen))
- return -EFAULT;
-
+ return -EFAULT;
+
/* Remove any cached route for this socket. */
+ lock_sock(sk);
dst_release(xchg(&sk->dst_cache, NULL));
+ release_sock(sk);
if (devname[0] == '\0') {
sk->bound_dev_if = 0;
@@ -331,30 +331,27 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
#ifdef CONFIG_FILTER
case SO_ATTACH_FILTER:
- if(optlen < sizeof(struct sock_fprog))
- return -EINVAL;
+ ret = -EINVAL;
+ if (optlen == sizeof(struct sock_fprog)) {
+ struct sock_fprog fprog;
- if(copy_from_user(&fprog, optval, sizeof(fprog)))
- {
ret = -EFAULT;
- break;
- }
+ if (copy_from_user(&fprog, optval, sizeof(fprog)))
+ break;
- ret = sk_attach_filter(&fprog, sk);
+ ret = sk_attach_filter(&fprog, sk);
+ }
break;
case SO_DETACH_FILTER:
- if(sk->filter)
- {
- fprog.filter = sk->filter_data;
- kfree_s(fprog.filter, (sizeof(fprog.filter) * sk->filter));
- sk->filter_data = NULL;
- sk->filter = 0;
+ filter = sk->filter;
+ if(filter) {
+ sk->filter = NULL;
+ synchronize_bh();
+ sk_filter_release(sk, filter);
return 0;
}
- else
- return -EINVAL;
- break;
+ return -ENOENT;
#endif
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
@@ -501,9 +498,23 @@ struct sock *sk_alloc(int family, int priority, int zero_it)
void sk_free(struct sock *sk)
{
+#ifdef CONFIG_FILTER
+ struct sk_filter *filter;
+#endif
if (sk->destruct)
sk->destruct(sk);
+#ifdef CONFIG_FILTER
+ filter = sk->filter;
+ if (filter) {
+ sk_filter_release(sk, filter);
+ sk->filter = NULL;
+ }
+#endif
+
+ if (atomic_read(&sk->omem_alloc))
+ printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));
+
kmem_cache_free(sk_cachep, sk);
}
@@ -933,7 +944,7 @@ int sock_no_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg)
*/
if (current->pgrp != -arg &&
current->pid != arg &&
- !capable(CAP_NET_ADMIN)) return(-EPERM);
+ !capable(CAP_KILL)) return(-EPERM);
sk->proc = arg;
return(0);
case F_GETOWN:
diff --git a/net/econet/econet.c b/net/econet/econet.c
index 8a3a72ae7..8930109b3 100644
--- a/net/econet/econet.c
+++ b/net/econet/econet.c
@@ -44,7 +44,6 @@
#include <net/udp.h>
#include <net/ip.h>
#include <asm/spinlock.h>
-#include <linux/inetdevice.h>
static struct proto_ops econet_ops;
static struct sock *econet_sklist;
diff --git a/net/ipv4/Config.in b/net/ipv4/Config.in
index 8e4b3945e..29786da5e 100644
--- a/net/ipv4/Config.in
+++ b/net/ipv4/Config.in
@@ -32,6 +32,9 @@ if [ "$CONFIG_FIREWALL" = "y" ]; then
fi
fi
bool 'IP: always defragment (required for masquerading)' CONFIG_IP_ALWAYS_DEFRAG
+ if [ "$CONFIG_IP_MULTIPLE_TABLES" = "y" ]; then
+ bool 'IP: use FWMARK value as routing key' CONFIG_IP_ROUTE_FWMARK
+ fi
fi
fi
if [ "$CONFIG_IP_FIREWALL" = "y" ]; then
@@ -78,7 +81,6 @@ comment '(it is safe to leave these untouched)'
tristate 'IP: Reverse ARP' CONFIG_INET_RARP
#bool 'IP: Path MTU Discovery (normally enabled)' CONFIG_PATH_MTU_DISCOVERY
#bool 'IP: Disable NAGLE algorithm (normally enabled)' CONFIG_TCP_NAGLE_OFF
-bool 'IP: Drop source routed frames' CONFIG_IP_NOSR
bool 'IP: Allow large windows (not recommended if <16Mb of memory)' CONFIG_SKB_LARGE
#if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
#bool 'IP: support experimental checksum copy to user for UDP' CONFIG_UDP_DELAY_CSUM
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3520b0c52..70fcf4024 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -5,7 +5,7 @@
*
* PF_INET protocol family socket handler.
*
- * Version: $Id: af_inet.c,v 1.82 1999/01/04 20:36:44 davem Exp $
+ * Version: $Id: af_inet.c,v 1.87 1999/04/22 10:07:33 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -53,6 +53,7 @@
* David S. Miller : New socket lookup architecture.
* Some other random speedups.
* Cyrus Durgin : Cleaned up file for kmod hacks.
+ * Andi Kleen : Fix inet_stream_connect TCP race.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -175,8 +176,6 @@ static __inline__ void kill_sk_now(struct sock *sk)
if(sk->opt)
kfree(sk->opt);
dst_release(sk->dst_cache);
- if (atomic_read(&sk->omem_alloc))
- printk(KERN_DEBUG "kill_sk_now: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));
sk_free(sk);
}
@@ -514,17 +513,6 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
(sk->num != 0))
return -EINVAL;
- snum = ntohs(addr->sin_port);
-#ifdef CONFIG_IP_MASQUERADE
- /* The kernel masquerader needs some ports. */
- if((snum >= PORT_MASQ_BEGIN) && (snum <= PORT_MASQ_END))
- return -EADDRINUSE;
-#endif
- if (snum == 0)
- snum = sk->prot->good_socknum();
- if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
- return(-EACCES);
-
chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
if (addr->sin_addr.s_addr != 0 && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) {
@@ -546,6 +534,17 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if(chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
sk->saddr = 0; /* Use device */
+ snum = ntohs(addr->sin_port);
+#ifdef CONFIG_IP_MASQUERADE
+ /* The kernel masquerader needs some ports. */
+ if((snum >= PORT_MASQ_BEGIN) && (snum <= PORT_MASQ_END))
+ return -EADDRINUSE;
+#endif
+ if (snum == 0)
+ snum = sk->prot->good_socknum();
+ if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+ return(-EACCES);
+
/* Make sure we are allowed to bind here. */
if(sk->prot->verify_bind(sk, snum))
return -EADDRINUSE;
@@ -613,15 +612,16 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
}
if(sock->state == SS_CONNECTING) {
+ /* Note: tcp_connected contains SYN_RECV, which may cause
+ bogus results here. -AK */
if(tcp_connected(sk->state)) {
sock->state = SS_CONNECTED;
return 0;
}
- if(sk->protocol == IPPROTO_TCP && (flags & O_NONBLOCK)) {
- if(sk->err)
- return sock_error(sk);
+ if (sk->zapped || sk->err)
+ goto sock_error;
+ if (flags & O_NONBLOCK)
return -EALREADY;
- }
} else {
/* We may need to bind the socket. */
if (inet_autobind(sk) != 0)
@@ -629,15 +629,17 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
if (sk->prot->connect == NULL)
return(-EOPNOTSUPP);
err = sk->prot->connect(sk, uaddr, addr_len);
+ /* Note: there is a theoretical race here when an wake up
+ occurred before inet_wait_for_connect is entered. In 2.3
+ the wait queue setup should be moved before the low level
+ connect call. -AK*/
if (err < 0)
return(err);
sock->state = SS_CONNECTING;
}
- if (sk->state > TCP_FIN_WAIT2 && sock->state == SS_CONNECTING) {
- sock->state = SS_UNCONNECTED;
- return sock_error(sk);
- }
+ if (sk->state > TCP_FIN_WAIT2 && sock->state == SS_CONNECTING)
+ goto sock_error;
if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return (-EINPROGRESS);
@@ -649,17 +651,20 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
}
sock->state = SS_CONNECTED;
- if ((sk->state != TCP_ESTABLISHED) && sk->err) {
- /* This is ugly but needed to fix a race in the ICMP error handler */
- if (sk->protocol == IPPROTO_TCP && sk->zapped) {
- lock_sock(sk);
- tcp_set_state(sk, TCP_CLOSE);
- release_sock(sk);
- }
- sock->state = SS_UNCONNECTED;
- return sock_error(sk);
- }
+ if ((sk->state != TCP_ESTABLISHED) && sk->err)
+ goto sock_error;
return(0);
+
+sock_error:
+ /* This is ugly but needed to fix a race in the ICMP error handler */
+ if (sk->zapped && sk->state != TCP_CLOSE) {
+ lock_sock(sk);
+ tcp_set_state(sk, TCP_CLOSE);
+ release_sock(sk);
+ sk->zapped = 0;
+ }
+ sock->state = SS_UNCONNECTED;
+ return sock_error(sk);
}
/*
@@ -828,6 +833,8 @@ int inet_shutdown(struct socket *sock, int how)
sk->shutdown |= how;
if (sk->prot->shutdown)
sk->prot->shutdown(sk, how);
+ /* Wake up anyone sleeping in poll. */
+ sk->state_change(sk);
return(0);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index efb16cc47..2c311f233 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1,6 +1,6 @@
/* linux/net/inet/arp.c
*
- * Version: $Id: arp.c,v 1.75 1998/11/16 04:51:56 davem Exp $
+ * Version: $Id: arp.c,v 1.77 1999/03/21 05:22:30 davem Exp $
*
* Copyright (C) 1994 by Florian La Roche
*
@@ -294,7 +294,7 @@ static int arp_constructor(struct neighbour *neigh)
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
{
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ dst_link_failure(skb);
kfree_skb(skb);
}
@@ -401,8 +401,12 @@ int arp_bind_neighbour(struct dst_entry *dst)
if (dev == NULL)
return 0;
- if (dst->neighbour == NULL)
- dst->neighbour = __neigh_lookup(&arp_tbl, &((struct rtable*)dst)->rt_gateway, dev, 1);
+ if (dst->neighbour == NULL) {
+ u32 nexthop = ((struct rtable*)dst)->rt_gateway;
+ if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT))
+ nexthop = 0;
+ dst->neighbour = __neigh_lookup(&arp_tbl, &nexthop, dev, 1);
+ }
return (dst->neighbour != NULL);
}
@@ -557,6 +561,19 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
if (htons(dev_type) != arp->ar_hrd)
goto out;
break;
+#ifdef CONFIG_NET_ETHERNET
+ case ARPHRD_ETHER:
+ /*
+ * ETHERNET devices will accept ARP hardware types of either
+ * 1 (Ethernet) or 6 (IEEE 802.2).
+ */
+ if (arp->ar_hrd != __constant_htons(ARPHRD_ETHER) &&
+ arp->ar_hrd != __constant_htons(ARPHRD_IEEE802))
+ goto out;
+ if (arp->ar_pro != __constant_htons(ETH_P_IP))
+ goto out;
+ break;
+#endif
#ifdef CONFIG_FDDI
case ARPHRD_FDDI:
/*
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index b1aa1a04e..c8b0fbbc8 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1,7 +1,7 @@
/*
* NET3 IP device support routines.
*
- * Version: $Id: devinet.c,v 1.25 1999/01/04 20:14:33 davem Exp $
+ * Version: $Id: devinet.c,v 1.28 1999/05/08 20:00:16 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -139,6 +139,7 @@ static void inetdev_destroy(struct in_device *in_dev)
devinet_sysctl_unregister(&in_dev->cnf);
#endif
in_dev->dev->ip_ptr = NULL;
+ synchronize_bh();
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
kfree(in_dev);
}
@@ -173,6 +174,8 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
continue;
}
*ifap1 = ifa->ifa_next;
+ synchronize_bh();
+
rtmsg_ifa(RTM_DELADDR, ifa);
notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa);
inet_free_ifa(ifa);
@@ -182,7 +185,7 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
-
+ synchronize_bh();
/* 3. Announce address deletion */
@@ -238,7 +241,7 @@ inet_insert_ifa(struct in_device *in_dev, struct in_ifaddr *ifa)
}
ifa->ifa_next = *ifap;
- /* ATOMIC_SET */
+ wmb();
*ifap = ifa;
/* Send message first, then call notifier.
@@ -650,8 +653,25 @@ u32 inet_select_addr(struct device *dev, u32 dst, int scope)
if (!dst || inet_ifa_match(dst, ifa))
return addr;
} endfor_ifa(in_dev);
+
+ if (addr || scope >= RT_SCOPE_LINK)
+ return addr;
+
+ /* Not loopback addresses on loopback should be preferred
+ in this case. It is importnat that lo is the first interface
+ in dev_base list.
+ */
+ for (dev=dev_base; dev; dev=dev->next) {
+ if ((in_dev=dev->ip_ptr) == NULL)
+ continue;
+
+ for_primary_ifa(in_dev) {
+ if (ifa->ifa_scope <= scope)
+ return ifa->ifa_local;
+ } endfor_ifa(in_dev);
+ }
- return addr;
+ return 0;
}
/*
@@ -692,6 +712,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
ifa->ifa_mask = inet_make_mask(8);
ifa->ifa_dev = in_dev;
ifa->ifa_scope = RT_SCOPE_HOST;
+ memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
inet_insert_ifa(in_dev, ifa);
}
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a3585cc0c..a17470483 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -5,7 +5,7 @@
*
* IPv4 Forwarding Information Base: FIB frontend.
*
- * Version: $Id: fib_frontend.c,v 1.14 1999/01/04 20:13:55 davem Exp $
+ * Version: $Id: fib_frontend.c,v 1.15 1999/03/21 05:22:31 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -189,7 +189,7 @@ unsigned inet_addr_type(u32 addr)
*/
int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
- struct device *dev, u32 *spec_dst)
+ struct device *dev, u32 *spec_dst, u32 *itag)
{
struct in_device *in_dev = dev->ip_ptr;
struct rt_key key;
@@ -209,6 +209,8 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
if (res.type != RTN_UNICAST)
return -EINVAL;
*spec_dst = FIB_RES_PREFSRC(res);
+ if (itag)
+ fib_combine_itag(itag, &res);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
#else
@@ -231,6 +233,7 @@ last_resort:
if (IN_DEV_RPFILTER(in_dev))
return -EINVAL;
*spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
+ *itag = 0;
return 0;
}
@@ -354,7 +357,7 @@ int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
for (t=s_t; t<=RT_TABLE_MAX; t++) {
if (t < s_t) continue;
if (t > s_t)
- memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(int));
+ memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
if ((tb = fib_get_table(t))==NULL)
continue;
if (tb->tb_dump(tb, skb, cb) < 0)
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 5232c618c..d9e029cef 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -5,7 +5,7 @@
*
* IPv4 FIB: lookup engine and maintenance routines.
*
- * Version: $Id: fib_hash.c,v 1.6 1998/10/03 09:37:06 davem Exp $
+ * Version: $Id: fib_hash.c,v 1.8 1999/03/25 10:04:17 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -302,6 +302,90 @@ fn_hash_lookup(struct fib_table *tb, const struct rt_key *key, struct fib_result
return 1;
}
+static int fn_hash_last_dflt=-1;
+
+static int fib_detect_death(struct fib_info *fi, int order,
+ struct fib_info **last_resort, int *last_idx)
+{
+ struct neighbour *n;
+ int state = NUD_NONE;
+
+ n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
+ if (n) {
+ state = n->nud_state;
+ neigh_release(n);
+ }
+ if (state==NUD_REACHABLE)
+ return 0;
+ if ((state&NUD_VALID) && order != fn_hash_last_dflt)
+ return 0;
+ if ((state&NUD_VALID) ||
+ (*last_idx<0 && order > fn_hash_last_dflt)) {
+ *last_resort = fi;
+ *last_idx = order;
+ }
+ return 1;
+}
+
+static void
+fn_hash_select_default(struct fib_table *tb, const struct rt_key *key, struct fib_result *res)
+{
+ int order, last_idx;
+ struct fib_node *f;
+ struct fib_info *fi = NULL;
+ struct fib_info *last_resort;
+ struct fn_hash *t = (struct fn_hash*)tb->tb_data;
+ struct fn_zone *fz = t->fn_zones[0];
+
+ if (fz == NULL)
+ return;
+
+ last_idx = -1;
+ last_resort = NULL;
+ order = -1;
+
+ for (f = fz->fz_hash[0]; f; f = f->fn_next) {
+ struct fib_info *next_fi = FIB_INFO(f);
+
+ if ((f->fn_state&FN_S_ZOMBIE) ||
+ f->fn_scope != res->scope ||
+ f->fn_type != RTN_UNICAST)
+ continue;
+
+ if (next_fi->fib_priority > res->fi->fib_priority)
+ break;
+ if (!next_fi->fib_nh[0].nh_gw || next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+ continue;
+ f->fn_state |= FN_S_ACCESSED;
+
+ if (fi == NULL) {
+ if (next_fi != res->fi)
+ break;
+ } else if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
+ res->fi = fi;
+ fn_hash_last_dflt = order;
+ return;
+ }
+ fi = next_fi;
+ order++;
+ }
+
+ if (order<=0 || fi==NULL) {
+ fn_hash_last_dflt = -1;
+ return;
+ }
+
+ if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
+ res->fi = fi;
+ fn_hash_last_dflt = order;
+ return;
+ }
+
+ if (last_idx >= 0)
+ res->fi = last_resort;
+ fn_hash_last_dflt = last_idx;
+}
+
#define FIB_SCAN(f, fp) \
for ( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
@@ -476,7 +560,6 @@ replace:
*/
new_f->fn_next = f;
- /* ATOMIC_SET */
*fp = new_f;
fz->fz_nent++;
@@ -484,6 +567,8 @@ replace:
f = *del_fp;
/* Unlink replaced node */
*del_fp = f->fn_next;
+ synchronize_bh();
+
if (!(f->fn_state&FN_S_ZOMBIE))
rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
if (f->fn_state&FN_S_ACCESSED)
@@ -571,6 +656,8 @@ FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
if (matched != 1) {
*del_fp = f->fn_next;
+ synchronize_bh();
+
if (f->fn_state&FN_S_ACCESSED)
rt_cache_flush(-1);
fn_free_node(f);
@@ -601,6 +688,8 @@ fn_flush_list(struct fib_node ** fp, int z, struct fn_hash *table)
if (fi && ((f->fn_state&FN_S_ZOMBIE) || (fi->fib_flags&RTNH_F_DEAD))) {
*fp = f->fn_next;
+ synchronize_bh();
+
fn_free_node(f);
found++;
continue;
@@ -710,7 +799,7 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
for (h=0; h < fz->fz_divisor; h++) {
if (h < s_h) continue;
if (h > s_h)
- memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(int));
+ memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
if (fz->fz_hash == NULL || fz->fz_hash[h] == NULL)
continue;
if (fn_hash_dump_bucket(skb, cb, tb, fz, fz->fz_hash[h]) < 0) {
@@ -732,7 +821,7 @@ static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlin
for (fz = table->fn_zone_list, m=0; fz; fz = fz->fz_next, m++) {
if (m < s_m) continue;
if (m > s_m)
- memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(int));
+ memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0]));
if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
cb->args[1] = m;
return -1;
@@ -784,6 +873,7 @@ __initfunc(struct fib_table * fib_hash_init(int id))
tb->tb_insert = fn_hash_insert;
tb->tb_delete = fn_hash_delete;
tb->tb_flush = fn_hash_flush;
+ tb->tb_select_default = fn_hash_select_default;
#ifdef CONFIG_RTNETLINK
tb->tb_dump = fn_hash_dump;
#endif
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 70fa5d843..868c44c31 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -5,7 +5,7 @@
*
* IPv4 Forwarding Information Base: policy rules.
*
- * Version: $Id: fib_rules.c,v 1.7 1998/10/03 09:37:09 davem Exp $
+ * Version: $Id: fib_rules.c,v 1.9 1999/03/25 10:04:23 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -16,6 +16,7 @@
*
* Fixes:
* Rani Assaf : local_rule cannot be deleted
+ * Marc Boucher : routing by fwmark
*/
#include <linux/config.h>
@@ -63,6 +64,9 @@ struct fib_rule
u32 r_srcmap;
u8 r_flags;
u8 r_tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ u32 r_fwmark;
+#endif
int r_ifindex;
#ifdef CONFIG_NET_CLS_ROUTE
__u32 r_tclassid;
@@ -88,13 +92,19 @@ int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
rtm->rtm_dst_len == r->r_dst_len &&
(!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 4) == 0) &&
rtm->rtm_tos == r->r_tos &&
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
+#endif
(!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
(!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
(!rta[RTA_IIF-1] || strcmp(RTA_DATA(rta[RTA_IIF-1]), r->r_ifname) == 0) &&
(!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
if (r == &local_rule)
return -EPERM;
+
*rp = r->r_next;
+ synchronize_bh();
+
if (r != &default_rule && r != &main_rule)
kfree(r);
return 0;
@@ -155,6 +165,10 @@ int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
new_r->r_srcmask = inet_make_mask(rtm->rtm_src_len);
new_r->r_dstmask = inet_make_mask(rtm->rtm_dst_len);
new_r->r_tos = rtm->rtm_tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ if (rta[RTA_PROTOINFO-1])
+ memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
+#endif
new_r->r_action = rtm->rtm_type;
new_r->r_flags = rtm->rtm_flags;
if (rta[RTA_PRIORITY-1])
@@ -267,14 +281,15 @@ FRprintk("Lookup: %08x <- %08x ", key->dst, key->src);
#ifdef CONFIG_IP_ROUTE_TOS
(r->r_tos && r->r_tos != key->tos) ||
#endif
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ (r->r_fwmark && r->r_fwmark != key->fwmark) ||
+#endif
(r->r_ifindex && r->r_ifindex != key->iif))
continue;
FRprintk("tb %d r %d ", r->r_table, r->r_action);
switch (r->r_action) {
case RTN_UNICAST:
- policy = NULL;
- break;
case RTN_NAT:
policy = r;
break;
@@ -295,14 +310,23 @@ FRprintk("ok\n");
res->r = policy;
return 0;
}
- if (err < 0)
+ if (err < 0 && err != -EAGAIN)
return err;
-FRprintk("RCONT ");
}
FRprintk("FAILURE\n");
return -ENETUNREACH;
}
+void fib_select_default(const struct rt_key *key, struct fib_result *res)
+{
+ if (res->r && res->r->r_action == RTN_UNICAST &&
+ FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
+ struct fib_table *tb;
+ if ((tb = fib_get_table(res->r->r_table)) != NULL)
+ tb->tb_select_default(tb, key, res);
+ }
+}
+
static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct device *dev = ptr;
@@ -337,6 +361,10 @@ extern __inline__ int inet_fill_rule(struct sk_buff *skb,
rtm->rtm_dst_len = r->r_dst_len;
rtm->rtm_src_len = r->r_src_len;
rtm->rtm_tos = r->r_tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ if (r->r_fwmark)
+ RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
+#endif
rtm->rtm_table = r->r_table;
rtm->rtm_protocol = 0;
rtm->rtm_scope = 0;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 7bff36095..b78f7ebaf 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -5,7 +5,7 @@
*
* IPv4 Forwarding Information Base: semantics.
*
- * Version: $Id: fib_semantics.c,v 1.11 1998/10/03 09:37:12 davem Exp $
+ * Version: $Id: fib_semantics.c,v 1.13 1999/03/21 05:22:34 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -89,7 +89,7 @@ static struct
{ -EINVAL, RT_SCOPE_UNIVERSE}, /* RTN_BLACKHOLE */
{ -EHOSTUNREACH, RT_SCOPE_UNIVERSE},/* RTN_UNREACHABLE */
{ -EACCES, RT_SCOPE_UNIVERSE}, /* RTN_PROHIBIT */
- { 1, RT_SCOPE_UNIVERSE}, /* RTN_THROW */
+ { -EAGAIN, RT_SCOPE_UNIVERSE}, /* RTN_THROW */
#ifdef CONFIG_IP_ROUTE_NAT
{ 0, RT_SCOPE_HOST}, /* RTN_NAT */
#else
@@ -420,7 +420,7 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
unsigned flavor = attr->rta_type;
if (flavor) {
if (flavor > FIB_MAX_METRICS)
- goto failure;
+ goto err_inval;
fi->fib_metrics[flavor-1] = *(unsigned*)RTA_DATA(attr);
}
attr = RTA_NEXT(attr, attrlen);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5ac2d9a53..199550ffb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -3,7 +3,7 @@
*
* Alan Cox, <alan@cymru.net>
*
- * Version: $Id: icmp.c,v 1.48 1999/01/02 16:51:41 davem Exp $
+ * Version: $Id: icmp.c,v 1.52 1999/03/21 12:04:11 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -279,6 +279,10 @@
#include <asm/uaccess.h>
#include <net/checksum.h>
+#ifdef CONFIG_IP_MASQUERADE
+#include <net/ip_masq.h>
+#endif
+
#define min(a,b) ((a)<(b)?(a):(b))
/*
@@ -369,6 +373,12 @@ struct socket *icmp_socket=&icmp_inode.u.socket_i;
* works for icmp destinations. This means the rate limiting information
* for one "ip object" is shared.
*
+ * Note that the same dst_entry fields are modified by functions in
+ * route.c too, but these work for packet destinations while xrlim_allow
+ * works for icmp destinations. This means the rate limiting information
+ * for one "ip object" is shared - and these ICMPs are twice limited:
+ * by source and by destination.
+ *
* RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
* SHOULD allow setting of rate limits
*
@@ -381,10 +391,10 @@ int xrlim_allow(struct dst_entry *dst, int timeout)
now = jiffies;
dst->rate_tokens += now - dst->rate_last;
+ dst->rate_last = now;
if (dst->rate_tokens > XRLIM_BURST_FACTOR*timeout)
dst->rate_tokens = XRLIM_BURST_FACTOR*timeout;
if (dst->rate_tokens >= timeout) {
- dst->rate_last = now;
dst->rate_tokens -= timeout;
return 1;
}
@@ -402,6 +412,14 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
return 1;
+ /* Redirect has its own rate limit mechanism */
+ if (type == ICMP_REDIRECT)
+ return 1;
+
+ /* No rate limit on loopback */
+ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
+ return 1;
+
return xrlim_allow(dst, *(icmp_pointers[type].timeout));
}
@@ -518,8 +536,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, unsigned long info)
/*
* Now check at the protocol level
*/
- if (!rt)
+ if (!rt) {
+#ifndef CONFIG_IP_ALWAYS_DEFRAG
+ if (net_ratelimit())
+ printk(KERN_DEBUG "icmp_send: destinationless packet\n");
+#endif
return;
+ }
if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST))
return;
@@ -566,6 +589,11 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, unsigned long info)
iph->saddr = rt->key.src;
}
#endif
+#ifdef CONFIG_IP_MASQUERADE
+ if (type==ICMP_DEST_UNREACH && IPCB(skb_in)->flags&IPSKB_MASQUERADED) {
+ ip_fw_unmasq_icmp(skb_in);
+ }
+#endif
saddr = iph->daddr;
if (!(rt->rt_flags & RTCF_LOCAL))
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b0e7b6d01..68e52633e 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -8,7 +8,7 @@
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
- * Version: $Id: igmp.c,v 1.28 1998/11/30 15:53:13 davem Exp $
+ * Version: $Id: igmp.c,v 1.30 1999/03/25 10:04:10 davem Exp $
*
* Authors:
* Alan Cox <Alan.Cox@linux.org>
@@ -97,6 +97,7 @@
#include <linux/mroute.h>
#endif
+#define IP_MAX_MEMBERSHIPS 20
#ifdef CONFIG_IP_MULTICAST
@@ -463,6 +464,8 @@ int ip_mc_dec_group(struct in_device *in_dev, u32 addr)
if (i->multiaddr==addr) {
if (--i->users == 0) {
*ip = i->next;
+ synchronize_bh();
+
igmp_group_dropped(i);
if (in_dev->dev->flags & IFF_UP)
ip_rt_multicast_event(in_dev);
@@ -610,7 +613,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
struct in_device *in_dev;
if (--iml->count)
return 0;
+
*imlp = iml->next;
+ synchronize_bh();
+
in_dev = inetdev_by_index(iml->multi.imr_ifindex);
if (in_dev)
ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr);
@@ -684,6 +690,8 @@ done:
len-=(offset-begin);
if(len>length)
len=length;
+ if(len<0)
+ len=0;
return len;
}
#endif
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index b617bc343..08ebbc2f1 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -5,7 +5,7 @@
*
* The IP forwarding functionality.
*
- * Version: $Id: ip_forward.c,v 1.42 1998/10/03 09:37:19 davem Exp $
+ * Version: $Id: ip_forward.c,v 1.43 1999/03/21 05:22:37 davem Exp $
*
* Authors: see ip.c
*
@@ -260,7 +260,7 @@ skip_call_fw_firewall:
if (rt->rt_flags&RTCF_FAST && !netdev_fastroute_obstacles) {
unsigned h = ((*(u8*)&rt->key.dst)^(*(u8*)&rt->key.src))&NETDEV_FASTROUTE_HMASK;
/* Time to switch to functional programming :-) */
- dst_release(xchg(&skb->dev->fastpath[h], dst_clone(&rt->u.dst)));
+ dst_release_irqwait(xchg(&skb->dev->fastpath[h], dst_clone(&rt->u.dst)));
}
#endif
ip_send(skb);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 8a0e40f0f..f066e6073 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -5,7 +5,7 @@
*
* The IP fragmentation functionality.
*
- * Version: $Id: ip_fragment.c,v 1.39 1998/08/26 10:35:26 davem Exp $
+ * Version: $Id: ip_fragment.c,v 1.40 1999/03/20 23:58:34 davem Exp $
*
* Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox <Alan.Cox@linux.org>
@@ -17,6 +17,7 @@
* xxxx : Overlapfrag bug.
* Ultima : ip_expire() kernel panic.
* Bill Hawes : Frag accounting and evictor fixes.
+ * John McDonald : 0 length frag bug.
*/
#include <linux/types.h>
@@ -357,7 +358,7 @@ static struct sk_buff *ip_glue(struct ipq *qp)
fp = qp->fragments;
count = qp->ihlen;
while(fp) {
- if ((fp->len < 0) || ((count + fp->len) > skb->len))
+ if ((fp->len <= 0) || ((count + fp->len) > skb->len))
goto out_invalid;
memcpy((ptr + fp->offset), fp->ptr, fp->len);
if (count == qp->ihlen) {
diff --git a/net/ipv4/ip_fw.c b/net/ipv4/ip_fw.c
index cf2731df1..f3dbafc04 100644
--- a/net/ipv4/ip_fw.c
+++ b/net/ipv4/ip_fw.c
@@ -32,6 +32,8 @@
* 3-Jan-1999: Fixed serious procfs security hole -- users should never
* be allowed to view the chains!
* Marc Santoro <ultima@snicker.emoti.com>
+ * 29-Jan-1999: Locally generated bogus IPs dealt with, rather than crash
+ * during dump_packet. --RR.
*/
/*
@@ -1660,6 +1662,10 @@ int ipfw_input_check(struct firewall_ops *this, int pf, struct device *dev,
int ipfw_output_check(struct firewall_ops *this, int pf, struct device *dev,
void *phdr, void *arg, struct sk_buff **pskb)
{
+ /* Locally generated bogus packets by root. <SIGH>. */
+ if (((struct iphdr *)phdr)->ihl * 4 < sizeof(struct iphdr)
+ || (*pskb)->len < sizeof(struct iphdr))
+ return FW_ACCEPT;
return ip_fw_check(phdr, dev->name,
arg, IP_FW_OUTPUT_CHAIN, *pskb, SLOT_NUMBER(), 0);
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 6488e9d70..6a7546fd5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -189,6 +189,46 @@ static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
return NULL;
}
+static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
+{
+ u32 remote = t->parms.iph.daddr;
+ u32 local = t->parms.iph.saddr;
+ u32 key = t->parms.i_key;
+ unsigned h = HASH(key);
+ int prio = 0;
+
+ if (local)
+ prio |= 1;
+ if (remote && !MULTICAST(remote)) {
+ prio |= 2;
+ h ^= HASH(remote);
+ }
+
+ return &tunnels[prio][h];
+}
+
+static void ipgre_tunnel_link(struct ip_tunnel *t)
+{
+ struct ip_tunnel **tp = ipgre_bucket(t);
+
+ t->next = *tp;
+ wmb();
+ *tp = t;
+}
+
+static void ipgre_tunnel_unlink(struct ip_tunnel *t)
+{
+ struct ip_tunnel **tp;
+
+ for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
+ if (t == *tp) {
+ *tp = t->next;
+ synchronize_bh();
+ break;
+ }
+ }
+}
+
static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
{
u32 remote = parms->iph.daddr;
@@ -241,10 +281,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
if (register_netdevice(dev) < 0)
goto failed;
- start_bh_atomic();
- nt->next = t;
- *tp = nt;
- end_bh_atomic();
+ ipgre_tunnel_link(nt);
/* Do not decrement MOD_USE_COUNT here. */
return nt;
@@ -256,28 +293,11 @@ failed:
static void ipgre_tunnel_destroy(struct device *dev)
{
- struct ip_tunnel *t, **tp;
- struct ip_tunnel *t0 = (struct ip_tunnel*)dev->priv;
- u32 remote = t0->parms.iph.daddr;
- u32 local = t0->parms.iph.saddr;
- unsigned h = HASH(t0->parms.i_key);
- int prio = 0;
+ ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv);
- if (local)
- prio |= 1;
- if (remote && !MULTICAST(remote)) {
- prio |= 2;
- h ^= HASH(remote);
- }
- for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
- if (t == t0) {
- *tp = t->next;
- if (dev != &ipgre_fb_tunnel_dev) {
- kfree(dev);
- MOD_DEC_USE_COUNT;
- }
- break;
- }
+ if (dev != &ipgre_fb_tunnel_dev) {
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
}
}
@@ -849,6 +869,41 @@ ipgre_tunnel_ioctl (struct device *dev, struct ifreq *ifr, int cmd)
t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
+ if (dev != &ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL &&
+ t != &ipgre_fb_tunnel) {
+ if (t != NULL) {
+ if (t->dev != dev) {
+ err = -EEXIST;
+ break;
+ }
+ } else {
+ unsigned nflags=0;
+
+ t = (struct ip_tunnel*)dev->priv;
+
+ if (MULTICAST(p.iph.daddr))
+ nflags = IFF_BROADCAST;
+ else if (p.iph.daddr)
+ nflags = IFF_POINTOPOINT;
+
+ if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
+ err = -EINVAL;
+ break;
+ }
+ start_bh_atomic();
+ ipgre_tunnel_unlink(t);
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ ipgre_tunnel_link(t);
+ end_bh_atomic();
+ netdev_state_change(dev);
+ }
+ }
+
if (t) {
err = 0;
if (cmd == SIOCCHGTUNNEL) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fbbfbbfc6..7a3e2618b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -5,7 +5,7 @@
*
* The Internet Protocol (IP) module.
*
- * Version: $Id: ip_input.c,v 1.35 1999/01/12 14:32:48 davem Exp $
+ * Version: $Id: ip_input.c,v 1.37 1999/04/22 10:38:36 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -253,7 +253,19 @@ int ip_local_deliver(struct sk_buff *skb)
* Do we need to de-masquerade this packet?
*/
{
- int ret = ip_fw_demasquerade(&skb);
+ int ret;
+ /*
+ * Some masq modules can re-inject packets if
+ * bad configured.
+ */
+
+ if((IPCB(skb)->flags&IPSKB_MASQUERADED)) {
+ printk(KERN_DEBUG "ip_input(): demasq recursion detected. Check masq modules configuration\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ ret = ip_fw_demasquerade(&skb);
if (ret < 0) {
kfree_skb(skb);
return 0;
@@ -387,6 +399,10 @@ int ip_local_deliver(struct sk_buff *skb)
int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
{
struct iphdr *iph = skb->nh.iph;
+#ifdef CONFIG_FIREWALL
+ int fwres;
+ u16 rport;
+#endif /* CONFIG_FIREWALL */
/*
* When the interface is in promisc. mode, drop all the crap
@@ -427,6 +443,30 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
__skb_trim(skb, len);
}
+#ifdef CONFIG_IP_ALWAYS_DEFRAG
+ /* Won't send ICMP reply, since skb->dst == NULL. --RR */
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+ skb = ip_defrag(skb);
+ if (!skb)
+ return 0;
+ iph = skb->nh.iph;
+ ip_send_check(iph);
+ }
+#endif
+
+#ifdef CONFIG_FIREWALL
+ /*
+ * See if the firewall wants to dispose of the packet.
+ *
+ * We can't do ICMP reply or local delivery before routing,
+ * so we delay those decisions until after route. --RR
+ */
+ fwres = call_in_firewall(PF_INET, dev, iph, &rport, &skb);
+ if (fwres < FW_ACCEPT && fwres != FW_REJECT)
+ goto drop;
+ iph = skb->nh.iph;
+#endif /* CONFIG_FIREWALL */
+
/*
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
@@ -442,13 +482,13 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
#endif
}
-#ifdef CONFIG_IP_ALWAYS_DEFRAG
- if (iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- skb = ip_defrag(skb);
- if (!skb)
- return 0;
- iph = skb->nh.iph;
- ip_send_check(iph);
+#ifdef CONFIG_NET_CLS_ROUTE
+ if (skb->dst->tclassid) {
+ u32 idx = skb->dst->tclassid;
+ ip_rt_acct[idx&0xFF].o_packets++;
+ ip_rt_acct[idx&0xFF].o_bytes+=skb->len;
+ ip_rt_acct[(idx>>16)&0xFF].i_packets++;
+ ip_rt_acct[(idx>>16)&0xFF].i_bytes+=skb->len;
}
#endif
@@ -462,7 +502,7 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
and running sniffer is extremely rare condition.
--ANK (980813)
*/
-
+
skb = skb_cow(skb, skb_headroom(skb));
if (skb == NULL)
return 0;
@@ -486,51 +526,17 @@ int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
}
}
- /*
- * See if the firewall wants to dispose of the packet.
- *
- * Note: the current standard firewall code expects that the
- * destination address was already checked against the interface
- * address lists.
- *
- * If this code is ever moved in front of ip_route_input() you need
- * to fix the fw code [moving it might be a good idea anyways,
- * so that we can firewall against potentially bugs in the options
- * or routing code]
- */
-
-#ifdef CONFIG_FIREWALL
- {
- int fwres;
- u16 rport;
-#ifdef CONFIG_IP_ROUTE_TOS
- u8 tos = iph->tos;
-#endif
-
- if ((fwres=call_in_firewall(PF_INET, skb->dev, iph, &rport, &skb))<FW_ACCEPT) {
- if (fwres==FW_REJECT)
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
- goto drop;
- }
-
+#ifdef CONFIG_FIREWALL
#ifdef CONFIG_IP_TRANSPARENT_PROXY
- if (fwres==FW_REDIRECT && (IPCB(skb)->redirport = rport) != 0)
- return ip_local_deliver(skb);
-#endif
-#ifdef CONFIG_IP_ROUTE_TOS
- /* It is for 2.2 only. Firewalling should make smart
- rerouting itself, ideally, but now it is too late
- to teach it. --ANK (980905)
- */
- if (iph->tos != tos && ((struct rtable*)skb->dst)->rt_type == RTN_UNICAST) {
- dst_release(skb->dst);
- skb->dst = NULL;
- if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))
- goto drop;
- }
-#endif
+ if (fwres == FW_REDIRECT && (IPCB(skb)->redirport = rport) != 0)
+ return ip_local_deliver(skb);
+#endif /* CONFIG_IP_TRANSPARENT_PROXY */
+
+ if (fwres == FW_REJECT) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+ goto drop;
}
-#endif
+#endif /* CONFIG_FIREWALL */
return skb->dst->input(skb);
diff --git a/net/ipv4/ip_masq.c b/net/ipv4/ip_masq.c
index 154e70686..cf02f9364 100644
--- a/net/ipv4/ip_masq.c
+++ b/net/ipv4/ip_masq.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 1994 Pauline Middelink
*
- * $Id: ip_masq.c,v 1.33 1999/01/15 06:45:17 davem Exp $
+ * $Id: ip_masq.c,v 1.34 1999/03/17 01:53:51 davem Exp $
*
*
* See ip_fw.c for original log
@@ -1294,7 +1294,79 @@ int ip_fw_masquerade(struct sk_buff **skb_p, __u32 maddr)
return 0;
}
+/*
+ * Restore original addresses and ports in the original IP
+ * datagram if the failing packet has been [de]masqueraded.
+ * This is ugly in the extreme. We no longer have the original
+ * packet so we have to reconstruct it from the failing packet
+ * plus data in the masq tables. The resulting "original data"
+ * should be good enough to tell the sender which session to
+ * throttle. Relies on far too much knowledge of masq internals,
+ * there ought to be a better way - KAO 990303.
+ *
+ * Moved here from icmp.c - JJC.
+ * Already known: type == ICMP_DEST_UNREACH, IPSKB_MASQUERADED
+ * skb->nh.iph points to original header.
+ *
+ * Must try both OUT and IN tables; we could add a flag
+ * ala IPSKB_MASQUERADED to avoid 2nd tables lookup, but this is VERY
+ * unlike because routing makes mtu decision before reaching
+ * ip_fw_masquerade().
+ *
+ */
+int ip_fw_unmasq_icmp(struct sk_buff *skb) {
+ struct ip_masq *ms;
+ struct iphdr *iph = skb->nh.iph;
+ __u16 *portp = (__u16 *)&(((char *)iph)[iph->ihl*4]);
+
+ /*
+ * Always called from _bh context: use read_[un]lock()
+ */
+
+ /*
+ * Peek "out" table, this packet has bounced:
+ * out->in(frag_needed!)->OUT[icmp]
+ *
+ * iph->daddr is IN host
+ * iph->saddr is OUT host
+ */
+ read_lock(&__ip_masq_lock);
+ ms = __ip_masq_out_get(iph->protocol,
+ iph->daddr, portp[1],
+ iph->saddr, portp[0]);
+ read_unlock(&__ip_masq_lock);
+ if (ms) {
+ IP_MASQ_DEBUG(1, "Incoming frag_need rewrited from %d.%d.%d.%d to %d.%d.%d.%d\n",
+ NIPQUAD(iph->daddr), NIPQUAD(ms->maddr));
+ iph->daddr = ms->maddr;
+ portp[1] = ms->mport;
+ __ip_masq_put(ms);
+ return 1;
+ }
+ /*
+ * Peek "in" table
+ * in->out(frag_needed!)->IN[icmp]
+ *
+ * iph->daddr is OUT host
+ * iph->saddr is MASQ host
+ *
+ */
+ read_lock(&__ip_masq_lock);
+ ms = __ip_masq_in_get(iph->protocol,
+ iph->daddr, portp[1],
+ iph->saddr, portp[0]);
+ read_unlock(&__ip_masq_lock);
+ if (ms) {
+ IP_MASQ_DEBUG(1, "Outgoing frag_need rewrited from %d.%d.%d.%d to %d.%d.%d.%d\n",
+ NIPQUAD(iph->saddr), NIPQUAD(ms->saddr));
+ iph->saddr = ms->saddr;
+ portp[0] = ms->sport;
+ __ip_masq_put(ms);
+ return 1;
+ }
+ return 0;
+}
/*
* Handle ICMP messages in forward direction.
* Find any that might be relevant, check against existing connections,
diff --git a/net/ipv4/ip_masq_mfw.c b/net/ipv4/ip_masq_mfw.c
index e3903c0cb..dc38b1712 100644
--- a/net/ipv4/ip_masq_mfw.c
+++ b/net/ipv4/ip_masq_mfw.c
@@ -3,7 +3,7 @@
*
* Does (reverse-masq) forwarding based on skb->fwmark value
*
- * $Id: ip_masq_mfw.c,v 1.2 1998/12/12 02:40:42 davem Exp $
+ * $Id: ip_masq_mfw.c,v 1.3 1999/01/26 05:33:47 davem Exp $
*
* Author: Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
* based on Steven Clarke's portfw
diff --git a/net/ipv4/ip_nat_dumb.c b/net/ipv4/ip_nat_dumb.c
index 9f9966b34..5a1c6d753 100644
--- a/net/ipv4/ip_nat_dumb.c
+++ b/net/ipv4/ip_nat_dumb.c
@@ -5,7 +5,7 @@
*
* Dumb Network Address Translation.
*
- * Version: $Id: ip_nat_dumb.c,v 1.7 1998/10/06 04:49:09 davem Exp $
+ * Version: $Id: ip_nat_dumb.c,v 1.8 1999/03/21 05:22:40 davem Exp $
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
@@ -89,6 +89,8 @@ ip_do_nat(struct sk_buff *skb)
{
struct icmphdr *icmph = (struct icmphdr*)((char*)iph + (iph->ihl<<2));
struct iphdr *ciph;
+ u32 idaddr, isaddr;
+ int updated;
if ((icmph->type != ICMP_DEST_UNREACH) &&
(icmph->type != ICMP_TIME_EXCEEDED) &&
@@ -100,8 +102,14 @@ ip_do_nat(struct sk_buff *skb)
if ((u8*)(ciph+1) > skb->tail)
goto truncated;
- if (rt->rt_flags&RTCF_DNAT && ciph->saddr == odaddr)
+ isaddr = ciph->saddr;
+ idaddr = ciph->daddr;
+ updated = 0;
+
+ if (rt->rt_flags&RTCF_DNAT && ciph->saddr == odaddr) {
ciph->saddr = iph->daddr;
+ updated = 1;
+ }
if (rt->rt_flags&RTCF_SNAT) {
if (ciph->daddr != osaddr) {
struct fib_result res;
@@ -115,16 +123,27 @@ ip_do_nat(struct sk_buff *skb)
#ifdef CONFIG_IP_ROUTE_TOS
key.tos = RT_TOS(ciph->tos);
#endif
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ key.fwmark = 0;
+#endif
/* Use fib_lookup() until we get our own
* hash table of NATed hosts -- Rani
*/
- if (fib_lookup(&key, &res) != 0)
- return 0;
- if (res.r)
+ if (fib_lookup(&key, &res) == 0 && res.r) {
ciph->daddr = fib_rules_policy(ciph->daddr, &res, &flags);
- }
- else
+ if (ciph->daddr != idaddr)
+ updated = 1;
+ }
+ } else {
ciph->daddr = iph->saddr;
+ updated = 1;
+ }
+ }
+ if (updated) {
+ cksum = &icmph->checksum;
+ /* Using tcpudp primitive. Why not? */
+ check = csum_tcpudp_magic(ciph->saddr, ciph->daddr, 0, 0, ~(*cksum));
+ *cksum = csum_tcpudp_magic(~isaddr, ~idaddr, 0, 0, ~check);
}
break;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 92502239c..fae22cbe7 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -5,7 +5,7 @@
*
* The options processing module for ip.c
*
- * Version: $Id: ip_options.c,v 1.15 1998/10/03 09:37:27 davem Exp $
+ * Version: $Id: ip_options.c,v 1.16 1999/03/21 05:22:40 davem Exp $
*
* Authors: A.N.Kuznetsov
*
@@ -137,17 +137,17 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
if (sopt->ts_needtime) {
if (soffset + 3 > optlen)
return -EINVAL;
- dopt->ts_needtime = 1;
- soffset += 4;
- if ((dptr[3]&0xF) == IPOPT_TS_PRESPEC) {
- __u32 addr;
- if (soffset + 3 > optlen)
- return -EINVAL;
+ if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) {
+ dopt->ts_needtime = 1;
soffset += 4;
+ } else {
+ dopt->ts_needtime = 0;
+
if (soffset + 8 <= optlen) {
- dopt->ts_needtime = 0;
+ __u32 addr;
+
memcpy(&addr, sptr+soffset-1, 4);
- if (inet_addr_type(addr) != RTN_UNICAST) {
+ if (inet_addr_type(addr) != RTN_LOCAL) {
dopt->ts_needtime = 1;
soffset += 8;
}
@@ -471,19 +471,21 @@ void ip_options_undo(struct ip_options * opt)
}
if (opt->rr_needaddr) {
unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr);
- memset(&optptr[optptr[2]-1], 0, 4);
optptr[2] -= 4;
+ memset(&optptr[optptr[2]-1], 0, 4);
}
if (opt->ts) {
unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr);
if (opt->ts_needtime) {
- memset(&optptr[optptr[2]-1], 0, 4);
optptr[2] -= 4;
- }
- if (opt->ts_needaddr)
memset(&optptr[optptr[2]-1], 0, 4);
- if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC)
+ if ((optptr[3]&0xF) == IPOPT_TS_PRESPEC)
+ optptr[2] -= 4;
+ }
+ if (opt->ts_needaddr) {
optptr[2] -= 4;
+ memset(&optptr[optptr[2]-1], 0, 4);
+ }
}
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ce027c374..44d635573 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -5,7 +5,7 @@
*
* The Internet Protocol (IP) output module.
*
- * Version: $Id: ip_output.c,v 1.64 1999/01/04 20:05:33 davem Exp $
+ * Version: $Id: ip_output.c,v 1.67 1999/03/25 00:43:00 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -36,8 +36,7 @@
* for decreased register pressure on x86
* and more readibility.
* Marc Boucher : When call_out_firewall returns FW_QUEUE,
- * silently abort send instead of failing
- * with -EPERM.
+ * silently drop skb instead of failing with -EPERM.
*/
#include <asm/uaccess.h>
@@ -132,8 +131,16 @@ void ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
dev = rt->u.dst.dev;
#ifdef CONFIG_FIREWALL
- if (call_out_firewall(PF_INET, dev, iph, NULL, &skb) < FW_ACCEPT)
- goto drop;
+ /* Now we have no better mechanism to notify about error. */
+ switch (call_out_firewall(PF_INET, dev, iph, NULL, &skb)) {
+ case FW_REJECT:
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+ /* Fall thru... */
+ case FW_BLOCK:
+ case FW_QUEUE:
+ kfree_skb(skb);
+ return;
+ }
#endif
ip_send_check(iph);
@@ -141,11 +148,6 @@ void ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
/* Send it out. */
skb->dst->output(skb);
return;
-
-#ifdef CONFIG_FIREWALL
-drop:
- kfree_skb(skb);
-#endif
}
int __ip_finish_output(struct sk_buff *skb)
@@ -292,8 +294,17 @@ void ip_queue_xmit(struct sk_buff *skb)
dev = rt->u.dst.dev;
#ifdef CONFIG_FIREWALL
- if (call_out_firewall(PF_INET, dev, iph, NULL, &skb) < FW_ACCEPT)
- goto drop;
+ /* Now we have no better mechanism to notify about error. */
+ switch (call_out_firewall(PF_INET, dev, iph, NULL, &skb)) {
+ case FW_REJECT:
+ start_bh_atomic();
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+ end_bh_atomic();
+ /* Fall thru... */
+ case FW_BLOCK:
+ case FW_QUEUE:
+ goto drop;
+ }
#endif
/* This can happen when the transport layer has segments queued
@@ -339,9 +350,13 @@ fragment:
into account). Actually, tcp should make it. --ANK (980801)
*/
iph->frag_off |= __constant_htons(IP_DF);
- printk(KERN_DEBUG "sending pkt_too_big to self\n");
+ NETDEBUG(printk(KERN_DEBUG "sending pkt_too_big to self\n"));
+
+ /* icmp_send is not reenterable, so that bh_atomic... --ANK */
+ start_bh_atomic();
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(rt->u.dst.pmtu));
+ end_bh_atomic();
goto drop;
}
ip_fragment(skb, skb->dst->output);
@@ -402,14 +417,13 @@ int ip_build_xmit_slow(struct sock *sk,
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
- if (!sk->ip_hdrincl)
- length -= sizeof(struct iphdr);
+ length -= sizeof(struct iphdr);
if (opt) {
fragheaderlen = sizeof(struct iphdr) + opt->optlen;
maxfraglen = ((mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
} else {
- fragheaderlen = sk->ip_hdrincl ? 0 : sizeof(struct iphdr);
+ fragheaderlen = sizeof(struct iphdr);
/*
* Fragheaderlen is the size of 'overhead' on each buffer. Now work
@@ -474,7 +488,6 @@ int ip_build_xmit_slow(struct sock *sk,
*/
do {
- int error;
char *data;
struct sk_buff * skb;
@@ -482,15 +495,10 @@ int ip_build_xmit_slow(struct sock *sk,
* Get the memory we require with some space left for alignment.
*/
- skb = sock_alloc_send_skb(sk, fraglen+hh_len+15, 0, flags&MSG_DONTWAIT, &error);
- if (skb == NULL) {
- ip_statistics.IpOutDiscards++;
- if(nfrags>1)
- ip_statistics.IpFragCreates++;
- dev_unlock_list();
- return(error);
- }
-
+ skb = sock_alloc_send_skb(sk, fraglen+hh_len+15, 0, flags&MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto error;
+
/*
* Fill in the control structures
*/
@@ -510,7 +518,7 @@ int ip_build_xmit_slow(struct sock *sk,
* Only write IP header onto non-raw packets
*/
- if(!sk->ip_hdrincl) {
+ {
struct iphdr *iph = (struct iphdr *)data;
iph->version = 4;
@@ -547,53 +555,46 @@ int ip_build_xmit_slow(struct sock *sk,
* User data callback
*/
- err = 0;
- if (getfrag(frag, data, offset, fraglen-fragheaderlen))
+ if (getfrag(frag, data, offset, fraglen-fragheaderlen)) {
err = -EFAULT;
-
- /*
- * Account for the fragment.
- */
-
-#ifdef CONFIG_FIREWALL
- if(!err) {
- int fw_res;
-
- fw_res = call_out_firewall(PF_INET, rt->u.dst.dev, skb->nh.iph, NULL, &skb);
- if(fw_res == FW_QUEUE) {
- kfree_skb(skb);
- skb = NULL;
- } else if(fw_res < FW_ACCEPT) {
- err = -EPERM;
- }
- }
-#endif
-
- if (err) {
- ip_statistics.IpOutDiscards++;
kfree_skb(skb);
- dev_unlock_list();
- return err;
+ goto error;
}
-
offset -= (maxfraglen-fragheaderlen);
fraglen = maxfraglen;
nfrags++;
- err = 0;
- if (skb && rt->u.dst.output(skb)) {
- err = -ENETDOWN;
- ip_statistics.IpOutDiscards++;
- break;
+#ifdef CONFIG_FIREWALL
+ switch (call_out_firewall(PF_INET, rt->u.dst.dev, skb->nh.iph, NULL, &skb)) {
+ case FW_QUEUE:
+ kfree_skb(skb);
+ continue;
+ case FW_BLOCK:
+ case FW_REJECT:
+ kfree_skb(skb);
+ err = -EPERM;
+ goto error;
}
+#endif
+
+ err = -ENETDOWN;
+ if (rt->u.dst.output(skb))
+ goto error;
} while (offset >= 0);
if (nfrags>1)
ip_statistics.IpFragCreates += nfrags;
dev_unlock_list();
- return err;
+ return 0;
+
+error:
+ ip_statistics.IpOutDiscards++;
+ if (nfrags>1)
+ ip_statistics.IpFragCreates += nfrags;
+ dev_unlock_list();
+ return err;
}
@@ -621,14 +622,20 @@ int ip_build_xmit(struct sock *sk,
* choice RAW frames within 20 bytes of maximum size(rare) to the long path
*/
- if (!sk->ip_hdrincl)
+ if (!sk->ip_hdrincl) {
length += sizeof(struct iphdr);
- /*
- * Check for slow path.
- */
- if (length > rt->u.dst.pmtu || ipc->opt != NULL)
- return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
+ /*
+ * Check for slow path.
+ */
+ if (length > rt->u.dst.pmtu || ipc->opt != NULL)
+ return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
+ } else {
+ if (length > rt->u.dst.dev->mtu) {
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);
+ return -EMSGSIZE;
+ }
+ }
/*
* Do path mtu discovery if needed.
@@ -636,7 +643,7 @@ int ip_build_xmit(struct sock *sk,
df = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
-
+
/*
* Fast path for unfragmented frames without options.
*/
@@ -679,31 +686,27 @@ int ip_build_xmit(struct sock *sk,
dev_unlock_list();
- if (err)
- err = -EFAULT;
+ if (err)
+ goto error_fault;
#ifdef CONFIG_FIREWALL
- if(!err) {
- int fw_res;
-
- fw_res = call_out_firewall(PF_INET, rt->u.dst.dev, iph, NULL, &skb);
- if(fw_res == FW_QUEUE) {
- /* re-queued elsewhere; silently abort this send */
- kfree_skb(skb);
- return 0;
- }
- if(fw_res < FW_ACCEPT)
- err = -EPERM;
- }
-#endif
-
- if (err) {
+ switch (call_out_firewall(PF_INET, rt->u.dst.dev, iph, NULL, &skb)) {
+ case FW_QUEUE:
kfree_skb(skb);
+ return 0;
+ case FW_BLOCK:
+ case FW_REJECT:
+ kfree_skb(skb);
+ err = -EPERM;
goto error;
}
-
+#endif
+
return rt->u.dst.output(skb);
+error_fault:
+ err = -EFAULT;
+ kfree_skb(skb);
error:
ip_statistics.IpOutDiscards++;
return err;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 1391cbd24..369a6770c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -5,7 +5,7 @@
*
* The IP to API glue.
*
- * Version: $Id: ip_sockglue.c,v 1.39 1998/10/03 09:37:33 davem Exp $
+ * Version: $Id: ip_sockglue.c,v 1.42 1999/04/22 10:07:34 davem Exp $
*
* Authors: see ip.c
*
@@ -150,7 +150,8 @@ int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc)
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
- if ((unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ if (cmsg->cmsg_len < sizeof(struct cmsghdr) ||
+ (unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ cmsg->cmsg_len) > msg->msg_controllen) {
return -EINVAL;
}
@@ -210,6 +211,8 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
return -EADDRINUSE;
}
*rap = ra->next;
+ synchronize_bh();
+
if (ra->destructor)
ra->destructor(sk);
kfree(ra);
@@ -220,10 +223,11 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s
return -ENOBUFS;
new_ra->sk = sk;
new_ra->destructor = destructor;
- start_bh_atomic();
+
new_ra->next = ra;
+ wmb();
*rap = new_ra;
- end_bh_atomic();
+
return 0;
}
@@ -404,7 +408,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
err = ip_options_get(&opt, optval, optlen, 1);
if (err)
return err;
- start_bh_atomic();
+ lock_sock(sk);
if (sk->type == SOCK_STREAM) {
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -420,7 +424,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
#endif
}
opt = xchg(&sk->opt, opt);
- end_bh_atomic();
+ release_sock(sk);
if (opt)
kfree_s(opt, sizeof(struct ip_options) + opt->optlen);
return 0;
@@ -463,11 +467,12 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int opt
!capable(CAP_NET_ADMIN))
return -EPERM;
if (sk->ip_tos != val) {
+ lock_sock(sk);
sk->ip_tos=val;
sk->priority = rt_tos2priority(val);
dst_release(xchg(&sk->dst_cache, NULL));
+ release_sock(sk);
}
- sk->priority = rt_tos2priority(val);
return 0;
case IP_TTL:
if (optlen<1)
@@ -637,11 +642,11 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *op
{
unsigned char optbuf[sizeof(struct ip_options)+40];
struct ip_options * opt = (struct ip_options*)optbuf;
- start_bh_atomic();
+ lock_sock(sk);
opt->optlen = 0;
if (sk->opt)
memcpy(optbuf, sk->opt, sizeof(struct ip_options)+sk->opt->optlen);
- end_bh_atomic();
+ release_sock(sk);
if (opt->optlen == 0)
return put_user(0, optlen);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 94e64eec6..abe93ec27 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1,5 +1,5 @@
/*
- * $Id: ipconfig.c,v 1.19 1999/01/15 06:54:00 davem Exp $
+ * $Id: ipconfig.c,v 1.20 1999/03/28 10:18:28 davem Exp $
*
* Automatic Configuration of IP -- use BOOTP or RARP or user-supplied
* information to configure own IP address and routes.
@@ -825,9 +825,9 @@ int __init ip_auto_config(void)
*/
if (ic_myaddr == INADDR_NONE ||
#ifdef CONFIG_ROOT_NFS
- root_server_addr == INADDR_NONE ||
+ (root_server_addr == INADDR_NONE && ic_servaddr == INADDR_NONE) ||
#endif
- (ic_first_dev && ic_first_dev->next)) {
+ ic_first_dev->next) {
#ifdef CONFIG_IP_PNP_DYNAMIC
if (ic_dynamic() < 0) {
printk(KERN_ERR "IP-Config: Auto-configuration of network failed.\n");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9175e6fe6..0aeef4a31 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -1,7 +1,7 @@
/*
* Linux NET3: IP/IP protocol decoder.
*
- * Version: $Id: ipip.c,v 1.24 1998/10/03 09:37:35 davem Exp $
+ * Version: $Id: ipip.c,v 1.26 1999/03/25 10:04:32 davem Exp $
*
* Authors:
* Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
@@ -157,6 +157,47 @@ static struct ip_tunnel * ipip_tunnel_lookup(u32 remote, u32 local)
return NULL;
}
+static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t)
+{
+ u32 remote = t->parms.iph.daddr;
+ u32 local = t->parms.iph.saddr;
+ unsigned h = 0;
+ int prio = 0;
+
+ if (remote) {
+ prio |= 2;
+ h ^= HASH(remote);
+ }
+ if (local) {
+ prio |= 1;
+ h ^= HASH(local);
+ }
+ return &tunnels[prio][h];
+}
+
+
+static void ipip_tunnel_unlink(struct ip_tunnel *t)
+{
+ struct ip_tunnel **tp;
+
+ for (tp = ipip_bucket(t); *tp; tp = &(*tp)->next) {
+ if (t == *tp) {
+ *tp = t->next;
+ synchronize_bh();
+ break;
+ }
+ }
+}
+
+static void ipip_tunnel_link(struct ip_tunnel *t)
+{
+ struct ip_tunnel **tp = ipip_bucket(t);
+
+ t->next = *tp;
+ wmb();
+ *tp = t;
+}
+
struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
{
u32 remote = parms->iph.daddr;
@@ -208,10 +249,7 @@ struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create)
if (register_netdevice(dev) < 0)
goto failed;
- start_bh_atomic();
- nt->next = t;
- *tp = nt;
- end_bh_atomic();
+ ipip_tunnel_link(nt);
/* Do not decrement MOD_USE_COUNT here. */
return nt;
@@ -221,39 +259,19 @@ failed:
return NULL;
}
+
static void ipip_tunnel_destroy(struct device *dev)
{
- struct ip_tunnel *t, **tp;
- struct ip_tunnel *t0 = (struct ip_tunnel*)dev->priv;
- u32 remote = t0->parms.iph.daddr;
- u32 local = t0->parms.iph.saddr;
- unsigned h = 0;
- int prio = 0;
-
if (dev == &ipip_fb_tunnel_dev) {
tunnels_wc[0] = NULL;
- return;
- }
-
- if (remote) {
- prio |= 2;
- h ^= HASH(remote);
- }
- if (local) {
- prio |= 1;
- h ^= HASH(local);
- }
- for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
- if (t == t0) {
- *tp = t->next;
- kfree(dev);
- MOD_DEC_USE_COUNT;
- break;
- }
+ synchronize_bh();
+ } else {
+ ipip_tunnel_unlink((struct ip_tunnel*)dev->priv);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
}
}
-
void ipip_err(struct sk_buff *skb, unsigned char *dp, int len)
{
#ifndef I_WISH_WORLD_WERE_PERFECT
@@ -642,6 +660,32 @@ ipip_tunnel_ioctl (struct device *dev, struct ifreq *ifr, int cmd)
t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
+ if (dev != &ipip_fb_tunnel_dev && cmd == SIOCCHGTUNNEL &&
+ t != &ipip_fb_tunnel) {
+ if (t != NULL) {
+ if (t->dev != dev) {
+ err = -EEXIST;
+ break;
+ }
+ } else {
+ if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
+ (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
+ err = -EINVAL;
+ break;
+ }
+ t = (struct ip_tunnel*)dev->priv;
+ start_bh_atomic();
+ ipip_tunnel_unlink(t);
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ ipip_tunnel_link(t);
+ end_bh_atomic();
+ netdev_state_change(dev);
+ }
+ }
+
if (t) {
err = 0;
if (cmd == SIOCCHGTUNNEL) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 99cda3ea0..d7db0c007 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.38 1999/01/12 14:34:40 davem Exp $
+ * Version: $Id: ipmr.c,v 1.40 1999/03/25 10:04:25 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
@@ -138,6 +138,8 @@ static struct device * reg_dev;
static int reg_vif_xmit(struct sk_buff *skb, struct device *dev)
{
+ ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len;
+ ((struct net_device_stats*)dev->priv)->tx_packets++;
ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
kfree_skb(skb);
return 0;
@@ -449,6 +451,9 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
struct igmpmsg *msg;
int ret;
+ if (mroute_socket==NULL)
+ return -EINVAL;
+
#ifdef CONFIG_IP_PIMSM
if (assert == IGMPMSG_WHOLEPKT)
skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
@@ -656,7 +661,10 @@ static void mrtsock_destruct(struct sock *sk)
{
if (sk == mroute_socket) {
ipv4_devconf.mc_forwarding = 0;
+
mroute_socket=NULL;
+ synchronize_bh();
+
mroute_close(sk);
}
}
@@ -1045,7 +1053,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,
dev = rt->u.dst.dev;
- if (skb->len+encap > rt->u.dst.pmtu /* && (ntohs(iph->frag_off) & IP_DF) */) {
+ if (skb->len+encap > rt->u.dst.pmtu && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
allow to send ICMP, so that packets will disappear
to blackhole.
@@ -1119,7 +1127,10 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c,
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
- skb2->dst->output(skb2);
+ if (skb2->len <= rt->u.dst.pmtu)
+ skb2->dst->output(skb2);
+ else
+ ip_fragment(skb2, skb2->dst->output);
}
int ipmr_find_vif(struct device *dev)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f8990903e..1640a0560 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -7,7 +7,7 @@
* PROC file system. It is mainly used for debugging and
* statistics.
*
- * Version: $Id: proc.c,v 1.33 1998/10/21 05:44:35 davem Exp $
+ * Version: $Id: proc.c,v 1.34 1999/02/08 11:20:34 davem Exp $
*
* Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
@@ -184,6 +184,8 @@ get__netinfo(struct proto *pro, char *buffer, int format, char **start, off_t of
for (req = sp->tp_pinfo.af_tcp.syn_wait_queue; req;
i++, req = req->dl_next) {
+ if (req->sk)
+ continue;
pos += 128;
if (pos < offset)
continue;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0079ed04d..dbde97b70 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -5,7 +5,7 @@
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.61 1999/01/12 14:34:43 davem Exp $
+ * Version: $Id: route.c,v 1.67 1999/05/08 20:00:20 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -49,6 +49,9 @@
* Andi Kleen : Load-limit warning messages.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Vitaly E. Lavrov : Race condition in ip_route_input_slow.
+ * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
+ * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
+ * Marc Boucher : routing by fwmark
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -108,6 +111,7 @@ int ip_rt_redirect_silence = ((HZ/50) << (9+1));
int ip_rt_error_cost = HZ;
int ip_rt_error_burst = 5*HZ;
int ip_rt_gc_elasticity = 8;
+int ip_rt_mtu_expires = 10*60*HZ;
static unsigned long rt_deadline = 0;
@@ -165,13 +169,14 @@ __u8 ip_tos2prio[16] = {
TC_PRIO_FILLER
};
+
/*
* Route cache.
*/
struct rtable *rt_hash_table[RT_HASH_DIVISOR];
-static struct rtable * rt_intern_hash(unsigned hash, struct rtable * rth);
+static int rt_intern_hash(unsigned hash, struct rtable * rth, struct rtable ** res);
static __inline__ unsigned rt_hash_code(u32 daddr, u32 saddr, u8 tos)
{
@@ -249,6 +254,12 @@ static __inline__ void rt_free(struct rtable *rt)
dst_free(&rt->u.dst);
}
+static __inline__ void rt_drop(struct rtable *rt)
+{
+ ip_rt_put(rt);
+ dst_free(&rt->u.dst);
+}
+
static __inline__ int rt_fast_clean(struct rtable *rth)
{
/* Kill broadcast/multicast entries very aggresively, if they
@@ -257,6 +268,30 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
&& rth->key.iif && rth->u.rt_next);
}
+static __inline__ int rt_valuable(struct rtable *rth)
+{
+ return ((rth->rt_flags&(RTCF_REDIRECTED|RTCF_NOTIFY))
+ || rth->u.dst.expires);
+}
+
+static __inline__ int rt_may_expire(struct rtable *rth, int tmo1, int tmo2)
+{
+ int age;
+
+ if (atomic_read(&rth->u.dst.use))
+ return 0;
+
+ if (rth->u.dst.expires && (long)(rth->u.dst.expires - jiffies) <= 0)
+ return 1;
+
+ age = jiffies - rth->u.dst.lastuse;
+ if (age <= tmo1 && !rt_fast_clean(rth))
+ return 0;
+ if (age <= tmo2 && rt_valuable(rth))
+ return 0;
+ return 1;
+}
+
static void rt_check_expire(unsigned long dummy)
{
int i;
@@ -271,22 +306,27 @@ static void rt_check_expire(unsigned long dummy)
rthp = &rt_hash_table[rover];
while ((rth = *rthp) != NULL) {
- /*
- * Cleanup aged off entries.
- */
-
- if (!atomic_read(&rth->u.dst.use) &&
- (now - rth->u.dst.lastuse > tmo
- || rt_fast_clean(rth))) {
- *rthp = rth->u.rt_next;
- rt_free(rth);
+ if (rth->u.dst.expires) {
+ /* Entrie is expired even if it is in use */
+ if ((long)(now - rth->u.dst.expires) <= 0) {
+ tmo >>= 1;
+ rthp = &rth->u.rt_next;
+ continue;
+ }
+ } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
+ tmo >>= 1;
+ rthp = &rth->u.rt_next;
continue;
}
- tmo >>= 1;
- rthp = &rth->u.rt_next;
+ /*
+ * Cleanup aged off entries.
+ */
+ *rthp = rth->u.rt_next;
+ rt_free(rth);
}
+ /* Fallback loop breaker. */
if ((jiffies - now) > 0)
break;
}
@@ -301,16 +341,21 @@ static void rt_run_flush(unsigned long dummy)
rt_deadline = 0;
+ start_bh_atomic();
for (i=0; i<RT_HASH_DIVISOR; i++) {
if ((rth = xchg(&rt_hash_table[i], NULL)) == NULL)
continue;
+ end_bh_atomic();
for (; rth; rth=next) {
next = rth->u.rt_next;
rth->u.rt_next = NULL;
rt_free(rth);
}
+
+ start_bh_atomic();
}
+ end_bh_atomic();
}
void rt_cache_flush(int delay)
@@ -354,60 +399,137 @@ void rt_cache_flush(int delay)
end_bh_atomic();
}
+/*
+ Short description of GC goals.
+
+ We want to build algorithm, which will keep routing cache
+ at some equilibrium point, when number of aged off entries
+ is kept approximately equal to newly generated ones.
+
+ Current expiration strength is variable "expire".
+ We try to adjust it dynamically, so that if networking
+ is idle expires is large enough to keep enough of warm entries,
+ and when load increases it reduces to limit cache size.
+ */
+
static int rt_garbage_collect(void)
{
- int i;
- static unsigned expire = RT_GC_TIMEOUT>>1;
+ static unsigned expire = RT_GC_TIMEOUT;
static unsigned long last_gc;
+ static int rover;
+ static int equilibrium;
struct rtable *rth, **rthp;
unsigned long now = jiffies;
-
- start_bh_atomic();
+ int goal;
/*
* Garbage collection is pretty expensive,
- * do not make it too frequently, but just increase expire strength.
+ * do not make it too frequently.
*/
- if (now - last_gc < ip_rt_gc_min_interval)
- goto out;
+ if (now - last_gc < ip_rt_gc_min_interval &&
+ atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
+ return 0;
- expire++;
+ /* Calculate number of entries, which we want to expire now. */
+ goal = atomic_read(&ipv4_dst_ops.entries) - RT_HASH_DIVISOR*ip_rt_gc_elasticity;
+ if (goal <= 0) {
+ if (equilibrium < ipv4_dst_ops.gc_thresh)
+ equilibrium = ipv4_dst_ops.gc_thresh;
+ goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
+ if (goal > 0) {
+ equilibrium += min(goal/2, RT_HASH_DIVISOR);
+ goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
+ }
+ } else {
+ /* We are in dangerous area. Try to reduce cache really
+ * aggressively.
+ */
+ goal = max(goal/2, RT_HASH_DIVISOR);
+ equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
+ }
- for (i=0; i<RT_HASH_DIVISOR; i++) {
- unsigned tmo;
- if (!rt_hash_table[i])
- continue;
- tmo = expire;
- for (rthp=&rt_hash_table[i]; (rth=*rthp); rthp=&rth->u.rt_next) {
- if (atomic_read(&rth->u.dst.use) ||
- (now - rth->u.dst.lastuse < tmo && !rt_fast_clean(rth))) {
- tmo >>= 1;
- continue;
+ if (now - last_gc >= ip_rt_gc_min_interval)
+ last_gc = now;
+
+ if (goal <= 0) {
+ equilibrium += goal;
+ goto work_done;
+ }
+
+ do {
+ int i, k;
+
+ start_bh_atomic();
+ for (i=0, k=rover; i<RT_HASH_DIVISOR; i++) {
+ unsigned tmo = expire;
+
+ k = (k + 1) & (RT_HASH_DIVISOR-1);
+ rthp = &rt_hash_table[k];
+ while ((rth = *rthp) != NULL) {
+ if (!rt_may_expire(rth, tmo, expire)) {
+ tmo >>= 1;
+ rthp = &rth->u.rt_next;
+ continue;
+ }
+ *rthp = rth->u.rt_next;
+ rth->u.rt_next = NULL;
+ rt_free(rth);
+ goal--;
}
- *rthp = rth->u.rt_next;
- rth->u.rt_next = NULL;
- rt_free(rth);
- break;
+ if (goal <= 0)
+ break;
}
- if ((jiffies-now)>0)
+ rover = k;
+ end_bh_atomic();
+
+ if (goal <= 0)
+ goto work_done;
+
+ /* Goal is not achieved. We stop process if:
+
+ - if expire reduced to zero. Otherwise, expire is halfed.
+ - if table is not full.
+ - if we are called from interrupt.
+ - jiffies check is just fallback/debug loop breaker.
+ We will not spin here for long time in any case.
+ */
+
+ if (expire == 0)
break;
- }
- last_gc = now;
- if (atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
- expire = ip_rt_gc_timeout>>1;
+ expire >>= 1;
+#if RT_CACHE_DEBUG >= 2
+ printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire, atomic_read(&ipv4_dst_ops.entries), goal, i);
+#endif
+
+ if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
+ return 0;
+ } while (!in_interrupt() && jiffies - now < 1);
-out:
- expire -= expire>>ip_rt_gc_elasticity;
- end_bh_atomic();
- return (atomic_read(&ipv4_dst_ops.entries) > ip_rt_max_size);
+ if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
+ return 0;
+ if (net_ratelimit())
+ printk("dst cache overflow\n");
+ return 1;
+
+work_done:
+ expire += ip_rt_gc_min_interval;
+ if (expire > ip_rt_gc_timeout ||
+ atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
+ expire = ip_rt_gc_timeout;
+#if RT_CACHE_DEBUG >= 2
+ printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire, atomic_read(&ipv4_dst_ops.entries), goal, rover);
+#endif
+ return 0;
}
-static struct rtable *rt_intern_hash(unsigned hash, struct rtable * rt)
+static int rt_intern_hash(unsigned hash, struct rtable * rt, struct rtable ** rp)
{
struct rtable *rth, **rthp;
unsigned long now = jiffies;
+ int attempts = !in_interrupt();
+restart:
start_bh_atomic();
rthp = &rt_hash_table[hash];
@@ -424,9 +546,9 @@ static struct rtable *rt_intern_hash(unsigned hash, struct rtable * rt)
rth->u.dst.lastuse = now;
end_bh_atomic();
- ip_rt_put(rt);
- rt_free(rt);
- return rth;
+ rt_drop(rt);
+ *rp = rth;
+ return 0;
}
rthp = &rth->u.rt_next;
@@ -435,8 +557,31 @@ static struct rtable *rt_intern_hash(unsigned hash, struct rtable * rt)
/* Try to bind route to arp only if it is output
route or unicast forwarding path.
*/
- if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0)
- arp_bind_neighbour(&rt->u.dst);
+ if (rt->rt_type == RTN_UNICAST || rt->key.iif == 0) {
+ if (!arp_bind_neighbour(&rt->u.dst)) {
+ end_bh_atomic();
+
+ /* Neighbour tables are full and nothing
+ can be released. Try to shrink route cache,
+ it is most likely it holds some neighbour records.
+ */
+ if (attempts-- > 0) {
+ int saved_elasticity = ip_rt_gc_elasticity;
+ int saved_int = ip_rt_gc_min_interval;
+ ip_rt_gc_elasticity = 1;
+ ip_rt_gc_min_interval = 0;
+ rt_garbage_collect();
+ ip_rt_gc_min_interval = saved_int;
+ ip_rt_gc_elasticity = saved_elasticity;
+ goto restart;
+ }
+
+ rt_drop(rt);
+ if (net_ratelimit())
+ printk("neighbour table overflow\n");
+ return -ENOBUFS;
+ }
+ }
rt->u.rt_next = rt_hash_table[hash];
#if RT_CACHE_DEBUG >= 2
@@ -449,9 +594,9 @@ static struct rtable *rt_intern_hash(unsigned hash, struct rtable * rt)
}
#endif
rt_hash_table[hash] = rt;
-
end_bh_atomic();
- return rt;
+ *rp = rt;
+ return 0;
}
void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
@@ -537,17 +682,15 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
!(rt->u.dst.neighbour->nud_state&NUD_VALID)) {
if (rt->u.dst.neighbour)
neigh_event_send(rt->u.dst.neighbour, NULL);
- ip_rt_put(rt);
ip_rt_put(rth);
- rt_free(rt);
+ rt_drop(rt);
break;
}
*rthp = rth->u.rt_next;
- rt = rt_intern_hash(hash, rt);
- ip_rt_put(rt);
- ip_rt_put(rth);
- rt_free(rth);
+ if (!rt_intern_hash(hash, rt, &rt))
+ ip_rt_put(rt);
+ rt_drop(rth);
break;
}
}
@@ -573,14 +716,14 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
ip_rt_put(rt);
return NULL;
}
- if (rt->rt_flags&RTCF_REDIRECTED) {
+ if ((rt->rt_flags&RTCF_REDIRECTED) || rt->u.dst.expires) {
unsigned hash = rt_hash_code(rt->key.dst, rt->key.src^(rt->key.oif<<5), rt->key.tos);
struct rtable **rthp;
#if RT_CACHE_DEBUG >= 1
printk(KERN_DEBUG "ip_rt_advice: redirect to %d.%d.%d.%d/%02x dropped\n", NIPQUAD(rt->rt_dst), rt->key.tos);
#endif
- ip_rt_put(rt);
start_bh_atomic();
+ ip_rt_put(rt);
for (rthp = &rt_hash_table[hash]; *rthp; rthp = &(*rthp)->u.rt_next) {
if (*rthp == rt) {
*rthp = rt->u.rt_next;
@@ -614,6 +757,10 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = (struct rtable*)skb->dst;
+ struct in_device *in_dev = (struct in_device*)rt->u.dst.dev->ip_ptr;
+
+ if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev))
+ return;
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
@@ -637,7 +784,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
rt->u.dst.rate_last = jiffies;
++rt->u.dst.rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (skb->dev->ip_ptr && IN_DEV_LOG_MARTIANS((struct in_device*)skb->dev->ip_ptr) &&
+ if (IN_DEV_LOG_MARTIANS(in_dev) &&
rt->u.dst.rate_tokens == ip_rt_redirect_number && net_ratelimit())
printk(KERN_WARNING "host %08x/if%d ignores redirects for %08x to %08x.\n",
rt->rt_src, rt->rt_iif, rt->rt_dst, rt->rt_gateway);
@@ -737,13 +884,23 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
if (mtu < rth->u.dst.pmtu) {
dst_confirm(&rth->u.dst);
rth->u.dst.pmtu = mtu;
+ dst_set_expires(&rth->u.dst, ip_rt_mtu_expires);
}
est_mtu = mtu;
}
}
}
}
- return est_mtu;
+ return est_mtu ? : new_mtu;
+}
+
+void ip_rt_update_pmtu(struct dst_entry *dst, unsigned mtu)
+{
+ if (dst->pmtu > mtu && mtu >= 68 &&
+ !(dst->mxlock&(1<<RTAX_MTU))) {
+ dst->pmtu = mtu;
+ dst_set_expires(dst, ip_rt_mtu_expires);
+ }
}
static struct dst_entry * ipv4_dst_check(struct dst_entry * dst, u32 cookie)
@@ -760,7 +917,13 @@ static struct dst_entry * ipv4_dst_reroute(struct dst_entry * dst,
static void ipv4_link_failure(struct sk_buff *skb)
{
+ struct rtable *rt;
+
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+
+ rt = (struct rtable *) skb->dst;
+ if (rt)
+ dst_set_expires(&rt->u.dst, 0);
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -794,7 +957,17 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
memcpy(addr, &src, 4);
}
-static void rt_set_nexthop(struct rtable *rt, struct fib_result *res)
+#ifdef CONFIG_NET_CLS_ROUTE
+static void set_class_tag(struct rtable *rt, u32 tag)
+{
+ if (!(rt->u.dst.tclassid&0xFFFF))
+ rt->u.dst.tclassid |= tag&0xFFFF;
+ if (!(rt->u.dst.tclassid&0xFFFF0000))
+ rt->u.dst.tclassid |= tag&0xFFFF0000;
+}
+#endif
+
+static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
{
struct fib_info *fi = res->fi;
@@ -824,9 +997,11 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res)
rt->u.dst.window= 0;
rt->u.dst.rtt = TCP_TIMEOUT_INIT;
}
-#if defined(CONFIG_NET_CLS_ROUTE) && defined(CONFIG_IP_MULTIPLE_TABLES)
- if (rt->u.dst.tclassid == 0)
- rt->u.dst.tclassid = fib_rules_tclass(res);
+#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+ set_class_tag(rt, fib_rules_tclass(res));
+#endif
+ set_class_tag(rt, itag);
#endif
rt->rt_type = res->type;
}
@@ -839,6 +1014,7 @@ ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
struct rtable *rth;
u32 spec_dst;
struct in_device *in_dev = dev->ip_ptr;
+ u32 itag = 0;
/* Primary sanity checks. */
@@ -850,7 +1026,7 @@ ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
if (!LOCAL_MCAST(daddr))
return -EINVAL;
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
- } else if (fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst) < 0)
+ } else if (fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, &itag) < 0)
return -EINVAL;
rth = dst_alloc(sizeof(struct rtable), &ipv4_dst_ops);
@@ -863,12 +1039,18 @@ ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->key.dst = daddr;
rth->rt_dst = daddr;
rth->key.tos = tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ rth->key.fwmark = skb->fwmark;
+#endif
rth->key.src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_IP_ROUTE_NAT
rth->rt_dst_map = daddr;
rth->rt_src_map = saddr;
#endif
+#ifdef CONFIG_NET_CLS_ROUTE
+ rth->u.dst.tclassid = itag;
+#endif
rth->rt_iif =
rth->key.iif = dev->ifindex;
rth->u.dst.dev = &loopback_dev;
@@ -888,8 +1070,7 @@ ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
#endif
hash = rt_hash_code(daddr, saddr^(dev->ifindex<<5), tos);
- skb->dst = (struct dst_entry*)rt_intern_hash(hash, rth);
- return 0;
+ return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
}
/*
@@ -910,6 +1091,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
struct in_device *in_dev = dev->ip_ptr;
struct in_device *out_dev;
unsigned flags = 0;
+ u32 itag = 0;
struct rtable * rth;
unsigned hash;
u32 spec_dst;
@@ -925,6 +1107,9 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
key.dst = daddr;
key.src = saddr;
key.tos = tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ key.fwmark = skb->fwmark;
+#endif
key.iif = dev->ifindex;
key.oif = 0;
key.scope = RT_SCOPE_UNIVERSE;
@@ -983,9 +1168,14 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
goto brd_input;
if (res.type == RTN_LOCAL) {
- spec_dst = daddr;
- if (inet_addr_type(saddr) != RTN_UNICAST)
+ int result;
+ result = fib_validate_source(saddr, daddr, tos, loopback_dev.ifindex,
+ dev, &spec_dst, &itag);
+ if (result < 0)
goto martian_source;
+ if (result)
+ flags |= RTCF_DIRECTSRC;
+ spec_dst = daddr;
goto local_input;
}
@@ -1005,14 +1195,14 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
return -EINVAL;
}
- err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(res), dev, &spec_dst);
+ err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(res), dev, &spec_dst, &itag);
if (err < 0)
goto martian_source;
if (err)
flags |= RTCF_DIRECTSRC;
- if (out_dev == in_dev && err && !(flags&RTCF_NAT) &&
+ if (out_dev == in_dev && err && !(flags&(RTCF_NAT|RTCF_MASQ)) &&
(IN_DEV_SHARED_MEDIA(out_dev)
|| inet_addr_onlink(out_dev, saddr, FIB_RES_GW(res))))
flags |= RTCF_DOREDIRECT;
@@ -1033,6 +1223,9 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->key.dst = daddr;
rth->rt_dst = daddr;
rth->key.tos = tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ rth->key.fwmark = skb->fwmark;
+#endif
rth->key.src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
@@ -1051,7 +1244,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->u.dst.input = ip_forward;
rth->u.dst.output = ip_output;
- rt_set_nexthop(rth, &res);
+ rt_set_nexthop(rth, &res, itag);
rth->rt_flags = flags;
@@ -1066,8 +1259,7 @@ int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
}
#endif
- skb->dst = (struct dst_entry*)rt_intern_hash(hash, rth);
- return 0;
+ return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
brd_input:
if (skb->protocol != __constant_htons(ETH_P_IP))
@@ -1076,7 +1268,7 @@ brd_input:
if (ZERONET(saddr)) {
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
} else {
- err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst);
+ err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, &itag);
if (err < 0)
goto martian_source;
if (err)
@@ -1096,12 +1288,18 @@ local_input:
rth->key.dst = daddr;
rth->rt_dst = daddr;
rth->key.tos = tos;
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ rth->key.fwmark = skb->fwmark;
+#endif
rth->key.src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_IP_ROUTE_NAT
rth->rt_dst_map = key.dst;
rth->rt_src_map = key.src;
#endif
+#ifdef CONFIG_NET_CLS_ROUTE
+ rth->u.dst.tclassid = itag;
+#endif
rth->rt_iif =
rth->key.iif = dev->ifindex;
rth->u.dst.dev = &loopback_dev;
@@ -1116,8 +1314,7 @@ local_input:
rth->rt_flags &= ~RTCF_LOCAL;
}
rth->rt_type = res.type;
- skb->dst = (struct dst_entry*)rt_intern_hash(hash, rth);
- return 0;
+ return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
no_route:
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -1170,6 +1367,9 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
rth->key.src == saddr &&
rth->key.iif == iif &&
rth->key.oif == 0 &&
+#ifdef CONFIG_IP_ROUTE_FWMARK
+ rth->key.fwmark == skb->fwmark &&
+#endif
rth->key.tos == tos) {
rth->u.dst.lastuse = jiffies;
atomic_inc(&rth->u.dst.use);
@@ -1292,13 +1492,16 @@ int ip_route_output_slow(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int
return -ENODEV; /* Wrong error code */
if (LOCAL_MCAST(daddr) || daddr == 0xFFFFFFFF) {
- key.src = inet_select_addr(dev_out, 0, RT_SCOPE_LINK);
+ if (!key.src)
+ key.src = inet_select_addr(dev_out, 0, RT_SCOPE_LINK);
goto make_route;
}
- if (MULTICAST(daddr))
- key.src = inet_select_addr(dev_out, 0, key.scope);
- else if (!daddr)
- key.src = inet_select_addr(dev_out, 0, RT_SCOPE_HOST);
+ if (!key.src) {
+ if (MULTICAST(daddr))
+ key.src = inet_select_addr(dev_out, 0, key.scope);
+ else if (!daddr)
+ key.src = inet_select_addr(dev_out, 0, RT_SCOPE_HOST);
+ }
}
if (!key.dst) {
@@ -1344,43 +1547,33 @@ int ip_route_output_slow(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int
if (res.type == RTN_NAT)
return -EINVAL;
-
- if (!key.src) {
- key.src = FIB_RES_PREFSRC(res);
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- /*
- * "Stabilization" of route.
- * This step is necessary, if locally originated packets
- * are subjected to policy routing, otherwise we could get
- * route flapping.
- */
- if (fib_lookup(&key, &res))
- return -ENETUNREACH;
-#endif
+ if (res.type == RTN_LOCAL) {
+ if (!key.src)
+ key.src = key.dst;
+ dev_out = &loopback_dev;
+ key.oif = dev_out->ifindex;
+ res.fi = NULL;
+ flags |= RTCF_LOCAL;
+ goto make_route;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res.fi->fib_nhs > 1 && key.oif == 0)
fib_select_multipath(&key, &res);
+ else
#endif
+ if (res.prefixlen==0 && res.type == RTN_UNICAST && key.oif == 0)
+ fib_select_default(&key, &res);
- dev_out = FIB_RES_DEV(res);
-
- if (res.type == RTN_LOCAL) {
- dev_out = &loopback_dev;
- key.oif = dev_out->ifindex;
- res.fi = NULL;
- flags |= RTCF_LOCAL;
- }
+ if (!key.src)
+ key.src = FIB_RES_PREFSRC(res);
+ dev_out = FIB_RES_DEV(res);
key.oif = dev_out->ifindex;
make_route:
- if (LOOPBACK(key.src) && !(dev_out->flags&IFF_LOOPBACK)) {
- printk(KERN_DEBUG "this guy talks to %08x from loopback\n", key.dst);
+ if (LOOPBACK(key.src) && !(dev_out->flags&IFF_LOOPBACK))
return -EINVAL;
- }
if (key.dst == 0xFFFFFFFF)
res.type = RTN_BROADCAST;
@@ -1449,13 +1642,12 @@ make_route:
#endif
}
- rt_set_nexthop(rth, &res);
+ rt_set_nexthop(rth, &res, 0);
rth->rt_flags = flags;
hash = rt_hash_code(daddr, saddr^(oif<<5), tos);
- *rp = rt_intern_hash(hash, rth);
- return 0;
+ return rt_intern_hash(hash, rth, rp);
}
int ip_route_output(struct rtable **rp, u32 daddr, u32 saddr, u32 tos, int oif)
@@ -1507,7 +1699,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int no
nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r));
r = NLMSG_DATA(nlh);
- nlh->nlmsg_flags = nowait ? NLM_F_MULTI : 0;
+ nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
@@ -1517,6 +1709,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int no
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
r->rtm_flags = (rt->rt_flags&~0xFFFF) | RTM_F_CLONED;
+ if (rt->rt_flags & RTCF_NOTIFY)
+ r->rtm_flags |= RTM_F_NOTIFY;
RTA_PUT(skb, RTA_DST, 4, &rt->rt_dst);
if (rt->key.src) {
r->rtm_src_len = 32;
@@ -1524,6 +1718,10 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int no
}
if (rt->u.dst.dev)
RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
+#ifdef CONFIG_NET_CLS_ROUTE
+ if (rt->u.dst.tclassid)
+ RTA_PUT(skb, RTA_FLOW, 4, &rt->u.dst.tclassid);
+#endif
if (rt->key.iif)
RTA_PUT(skb, RTA_PREFSRC, 4, &rt->rt_spec_dst);
else if (rt->rt_src != rt->key.src)
@@ -1546,7 +1744,10 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int no
ci.rta_lastuse = jiffies - rt->u.dst.lastuse;
ci.rta_used = atomic_read(&rt->u.dst.refcnt);
ci.rta_clntref = atomic_read(&rt->u.dst.use);
- ci.rta_expires = 0;
+ if (rt->u.dst.expires)
+ ci.rta_expires = rt->u.dst.expires - jiffies;
+ else
+ ci.rta_expires = 0;
ci.rta_error = rt->u.dst.error;
#ifdef CONFIG_IP_MROUTE
eptr = (struct rtattr*)skb->tail;
@@ -1625,7 +1826,7 @@ int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
end_bh_atomic();
rt = (struct rtable*)skb->dst;
if (!err && rt->u.dst.error)
- err = rt->u.dst.error;
+ err = -rt->u.dst.error;
} else {
int oif = 0;
if (rta[RTA_OIF-1])
@@ -1667,7 +1868,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
for (h=0; h < RT_HASH_DIVISOR; h++) {
if (h < s_h) continue;
if (h > s_h)
- memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(int));
+ s_idx = 0;
start_bh_atomic();
for (rt = rt_hash_table[h], idx = 0; rt; rt = rt->u.rt_next, idx++) {
if (idx < s_idx)
@@ -1717,7 +1918,7 @@ int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
ctl_table ipv4_route_table[] = {
{NET_IPV4_ROUTE_FLUSH, "flush",
- &flush_delay, sizeof(int), 0644, NULL,
+ &flush_delay, sizeof(int), 0200, NULL,
&ipv4_sysctl_rtcache_flush},
{NET_IPV4_ROUTE_MIN_DELAY, "min_delay",
&ip_rt_min_delay, sizeof(int), 0644, NULL,
@@ -1758,12 +1959,45 @@ ctl_table ipv4_route_table[] = {
{NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity",
&ip_rt_gc_elasticity, sizeof(int), 0644, NULL,
&proc_dointvec},
+ {NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires",
+ &ip_rt_mtu_expires, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies},
{0}
};
#endif
+#ifdef CONFIG_NET_CLS_ROUTE
+struct ip_rt_acct ip_rt_acct[256];
+
+#ifdef CONFIG_PROC_FS
+static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ *start=buffer;
+
+ if (offset + length > sizeof(ip_rt_acct)) {
+ length = sizeof(ip_rt_acct) - offset;
+ *eof = 1;
+ }
+ if (length > 0) {
+ start_bh_atomic();
+ memcpy(buffer, ((u8*)&ip_rt_acct)+offset, length);
+ end_bh_atomic();
+ return length;
+ }
+ return 0;
+}
+#endif
+#endif
+
+
__initfunc(void ip_rt_init(void))
{
+#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_NET_CLS_ROUTE
+ struct proc_dir_entry *ent;
+#endif
+#endif
devinet_init();
ip_fib_init();
rt_periodic_timer.function = rt_check_expire;
@@ -1781,5 +2015,9 @@ __initfunc(void ip_rt_init(void))
0, &proc_net_inode_operations,
rt_cache_get_info
});
+#ifdef CONFIG_NET_CLS_ROUTE
+ ent = create_proc_entry("net/rt_acct", 0, 0);
+ ent->read_proc = ip_rt_acct_read;
+#endif
#endif
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c3e219d46..655176432 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * $Id: syncookies.c,v 1.6 1998/06/10 07:29:22 davem Exp $
+ * $Id: syncookies.c,v 1.7 1999/03/17 02:34:57 davem Exp $
*
* Missing: IPv6 support.
*/
@@ -147,6 +147,8 @@ cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt)
req->af.v4_req.rmt_addr = skb->nh.iph->saddr;
req->class = &or_ipv4; /* for savety */
+ req->af.v4_req.opt = NULL;
+
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
@@ -162,7 +164,6 @@ cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt)
}
}
- req->af.v4_req.opt = NULL;
req->snd_wscale = req->rcv_wscale = req->tstamp_ok = 0;
req->wscale_ok = 0;
req->expires = 0UL;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 67e482e86..8c1c9f9be 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.134 1999/01/09 08:50:09 davem Exp $
+ * Version: $Id: tcp.c,v 1.140 1999/04/22 10:34:31 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -735,18 +735,25 @@ static void wait_for_tcp_memory(struct sock * sk)
* Note: must be called with the socket locked.
*/
-int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov, int flags)
+int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
{
- struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- int mss_now;
- int err = 0;
- int copied = 0;
+ struct iovec *iov;
+ struct tcp_opt *tp;
struct sk_buff *skb;
+ int iovlen, flags;
+ int mss_now;
+ int err, copied;
+
+ lock_sock(sk);
+
+ err = 0;
+ tp = &(sk->tp_pinfo.af_tcp);
/* Wait for a connection to finish. */
+ flags = msg->msg_flags;
if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if((err = wait_for_tcp_connect(sk, flags)) != 0)
- return err;
+ goto out;
/* This should be in poll */
sk->socket->flags &= ~SO_NOSPACE; /* clear SIGIO XXX */
@@ -754,6 +761,10 @@ int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov, int flags)
mss_now = tcp_current_mss(sk);
/* Ok commence sending. */
+ iovlen = msg->msg_iovlen;
+ iov = msg->msg_iov;
+ copied = 0;
+
while(--iovlen >= 0) {
int seglen=iov->iov_len;
unsigned char * from=iov->iov_base;
@@ -761,7 +772,7 @@ int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov, int flags)
iov++;
while(seglen > 0) {
- int copy, tmp, queue_it;
+ int copy, tmp, queue_it, psh;
if (err)
goto do_fault2;
@@ -843,11 +854,14 @@ int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov, int flags)
* being outside the window, it will be queued
* for later rather than sent.
*/
+ psh = 0;
copy = tp->snd_wnd - (tp->snd_nxt - tp->snd_una);
- if(copy >= (tp->max_window >> 1))
+ if(copy > (tp->max_window >> 1)) {
copy = min(copy, mss_now);
- else
+ psh = 1;
+ } else {
copy = mss_now;
+ }
if(copy > seglen)
copy = seglen;
@@ -895,7 +909,7 @@ int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov, int flags)
/* Prepare control bits for TCP header creation engine. */
TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK |
- (PSH_NEEDED ?
+ ((PSH_NEEDED || psh) ?
TCPCB_FLAG_PSH : 0));
TCP_SKB_CB(skb)->sacked = 0;
if (flags & MSG_OOB) {
@@ -926,26 +940,36 @@ int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov, int flags)
}
}
sk->err = 0;
- return copied;
+ err = copied;
+ goto out;
do_sock_err:
if(copied)
- return copied;
- return sock_error(sk);
+ err = copied;
+ else
+ err = sock_error(sk);
+ goto out;
do_shutdown:
if(copied)
- return copied;
- if (!(flags&MSG_NOSIGNAL))
- send_sig(SIGPIPE, current, 0);
- return -EPIPE;
+ err = copied;
+ else {
+ if (!(flags&MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ err = -EPIPE;
+ }
+ goto out;
do_interrupted:
if(copied)
- return copied;
- return err;
+ err = copied;
+ goto out;
do_fault:
kfree_skb(skb);
do_fault2:
- return -EFAULT;
+ err = -EFAULT;
+out:
+ tcp_push_pending_frames(sk, tp);
+ release_sock(sk);
+ return err;
}
#undef PSH_NEEDED
@@ -1070,6 +1094,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
if(copied > 0) {
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
__u32 rcv_window_now = tcp_receive_window(tp);
+ __u32 new_window = __tcp_select_window(sk);
/* We won't be raising the window any further than
* the window-clamp allows. Our window selection
@@ -1077,7 +1102,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
* checks are necessary to prevent spurious ACKs
* which don't advertize a larger window.
*/
- if((copied >= rcv_window_now) &&
+ if((new_window && (new_window >= rcv_window_now * 2)) &&
((rcv_window_now + tp->mss_cache) <= tp->window_clamp))
tcp_read_wakeup(sk);
}
@@ -1394,9 +1419,6 @@ void tcp_shutdown(struct sock *sk, int how)
(TCPF_ESTABLISHED|TCPF_SYN_SENT|TCPF_SYN_RECV|TCPF_CLOSE_WAIT)) {
lock_sock(sk);
- /* Flag that the sender has shutdown. */
- sk->shutdown |= SEND_SHUTDOWN;
-
/* Clear out any half completed packets. FIN if needed. */
if (tcp_close_state(sk,0))
tcp_send_fin(sk);
@@ -1683,13 +1705,9 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
} else {
sk->nonagle = 0;
- if (tp->send_head) {
- lock_sock(sk);
- if (tp->send_head &&
- tcp_snd_test (sk, tp->send_head))
- tcp_write_xmit(sk);
- release_sock(sk);
- }
+ lock_sock(sk);
+ tcp_push_pending_frames(sk, tp);
+ release_sock(sk);
}
return 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index aca7026b9..4a607a749 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.153 1999/01/20 07:20:03 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.164 1999/05/08 21:09:52 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -97,7 +97,7 @@ static int prune_queue(struct sock *sk);
static void tcp_delack_estimator(struct tcp_opt *tp)
{
if(tp->ato == 0) {
- tp->lrcvtime = jiffies;
+ tp->lrcvtime = tcp_time_stamp;
/* Help sender leave slow start quickly,
* and also makes sure we do not take this
@@ -106,9 +106,9 @@ static void tcp_delack_estimator(struct tcp_opt *tp)
tp->ato = 1;
tcp_enter_quickack_mode(tp);
} else {
- int m = jiffies - tp->lrcvtime;
+ int m = tcp_time_stamp - tp->lrcvtime;
- tp->lrcvtime = jiffies;
+ tp->lrcvtime = tcp_time_stamp;
if(m <= 0)
m = 1;
if(m > tp->rto)
@@ -130,11 +130,15 @@ static __inline__ void tcp_remember_ack(struct tcp_opt *tp, struct tcphdr *th,
{
tp->delayed_acks++;
- /* Tiny-grams with PSH set make us ACK quickly.
- * Note: This also clears the "quick ack mode" bit.
+ /* Tiny-grams with PSH set artifically deflate our
+ * ato measurement, but with a lower bound.
*/
- if(th->psh && (skb->len < (tp->mss_cache >> 1)))
- tp->ato = HZ/50;
+ if(th->psh && (skb->len < (tp->mss_cache >> 1))) {
+ /* Preserve the quickack state. */
+ if((tp->ato & 0x7fffffff) > HZ/50)
+ tp->ato = ((tp->ato & 0x80000000) |
+ (HZ/50));
+ }
}
/* Called to compute a smoothed rtt estimate. The data fed to this
@@ -227,7 +231,7 @@ extern __inline__ void tcp_replace_ts_recent(struct sock *sk, struct tcp_opt *tp
*/
if((s32)(tp->rcv_tsval - tp->ts_recent) >= 0) {
tp->ts_recent = tp->rcv_tsval;
- tp->ts_recent_stamp = jiffies;
+ tp->ts_recent_stamp = tcp_time_stamp;
}
}
}
@@ -237,8 +241,8 @@ extern __inline__ void tcp_replace_ts_recent(struct sock *sk, struct tcp_opt *tp
extern __inline__ int tcp_paws_discard(struct tcp_opt *tp, struct tcphdr *th, unsigned len)
{
/* ts_recent must be younger than 24 days */
- return (((jiffies - tp->ts_recent_stamp) >= PAWS_24DAYS) ||
- (((s32)(tp->rcv_tsval-tp->ts_recent) < 0) &&
+ return (((s32)(tcp_time_stamp - tp->ts_recent_stamp) >= PAWS_24DAYS) ||
+ (((s32)(tp->rcv_tsval - tp->ts_recent) < 0) &&
/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM */
(len != (th->doff * 4))));
}
@@ -341,6 +345,7 @@ void tcp_parse_options(struct sock *sk, struct tcphdr *th, struct tcp_opt *tp, i
{
unsigned char *ptr;
int length=(th->doff*4)-sizeof(struct tcphdr);
+ int saw_mss = 0;
ptr = (unsigned char *)(th + 1);
tp->saw_tstamp = 0;
@@ -369,6 +374,7 @@ void tcp_parse_options(struct sock *sk, struct tcphdr *th, struct tcp_opt *tp, i
in_mss = 536;
if (tp->mss_clamp > in_mss)
tp->mss_clamp = in_mss;
+ saw_mss = 1;
}
break;
case TCPOPT_WINDOW:
@@ -422,6 +428,8 @@ void tcp_parse_options(struct sock *sk, struct tcphdr *th, struct tcp_opt *tp, i
length-=opsize;
};
}
+ if(th->syn && saw_mss == 0)
+ tp->mss_clamp = 536;
}
/* Fast parse options. This hopes to only see timestamps.
@@ -489,8 +497,7 @@ static void tcp_fast_retrans(struct sock *sk, u32 ack, int not_dup)
if (tp->high_seq == 0 || after(ack, tp->high_seq)) {
tp->dup_acks++;
if ((tp->fackets_out > 3) || (tp->dup_acks == 3)) {
- tp->snd_ssthresh =
- max(min(tp->snd_wnd, tp->snd_cwnd) >> 1, 2);
+ tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
tp->snd_cwnd = (tp->snd_ssthresh + 3);
tp->high_seq = tp->snd_nxt;
if(!tp->fackets_out)
@@ -601,7 +608,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __u32 ack,
{
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
struct sk_buff *skb;
- unsigned long now = jiffies;
+ __u32 now = tcp_time_stamp;
int acked = 0;
/* If we are retransmitting, and this ACK clears up to
@@ -658,7 +665,7 @@ static void tcp_ack_probe(struct sock *sk, __u32 ack)
tp->probes_out = 0;
/* Was it a usable window open? */
-
+
/* should always be non-null */
if (tp->send_head != NULL &&
!before (ack + tp->snd_wnd, TCP_SKB_CB(tp->send_head)->end_seq)) {
@@ -717,7 +724,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, struct tcp_opt *tp,
if (!(flag & FLAG_DATA_ACKED))
return;
- seq_rtt = jiffies-tp->rcv_tsecr;
+ seq_rtt = tcp_time_stamp - tp->rcv_tsecr;
tcp_rtt_estimator(tp, seq_rtt);
if (tp->retransmits) {
if (tp->packets_out == 0) {
@@ -741,7 +748,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, struct tcp_opt *tp,
static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
{
struct sk_buff *skb = skb_peek(&sk->write_queue);
- long when = tp->rto - (jiffies - TCP_SKB_CB(skb)->when);
+ __u32 when = tp->rto - (tcp_time_stamp - TCP_SKB_CB(skb)->when);
/* Some data was ACK'd, if still retransmitting (due to a
* timeout), resend more of the retransmit queue. The
@@ -770,7 +777,7 @@ static int tcp_ack(struct sock *sk, struct tcphdr *th,
if (tp->pending == TIME_KEEPOPEN)
tp->probes_out = 0;
- tp->rcv_tstamp = jiffies;
+ tp->rcv_tstamp = tcp_time_stamp;
/* If the ack is newer than sent or older than previous acks
* then we can probably ignore it.
@@ -945,7 +952,9 @@ int tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
tcp_tw_deschedule(tw);
tcp_timewait_kill(tw);
sk = af_specific->get_sock(skb, th);
- if(sk == NULL || !ipsec_sk_policy(sk,skb))
+ if(sk == NULL ||
+ !ipsec_sk_policy(sk,skb) ||
+ atomic_read(&sk->sock_readers) != 0)
return 0;
skb_set_owner_r(skb, sk);
af_specific = sk->tp_pinfo.af_tcp.af_specific;
@@ -2102,7 +2111,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->tcp_header_len = sizeof(struct tcphdr);
if (tp->saw_tstamp) {
tp->ts_recent = tp->rcv_tsval;
- tp->ts_recent_stamp = jiffies;
+ tp->ts_recent_stamp = tcp_time_stamp;
}
/* Can't be earlier, doff would be wrong. */
@@ -2126,7 +2135,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tcp_parse_options(sk, th, tp, 0);
if (tp->saw_tstamp) {
tp->ts_recent = tp->rcv_tsval;
- tp->ts_recent_stamp = jiffies;
+ tp->ts_recent_stamp = tcp_time_stamp;
}
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
@@ -2179,8 +2188,22 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
}
}
+ /* The silly FIN test here is necessary to see an advancing ACK in
+ * retransmitted FIN frames properly. Consider the following sequence:
+ *
+ * host1 --> host2 FIN XSEQ:XSEQ(0) ack YSEQ
+ * host2 --> host1 FIN YSEQ:YSEQ(0) ack XSEQ
+ * host1 --> host2 XSEQ:XSEQ(0) ack YSEQ+1
+ * host2 --> host1 FIN YSEQ:YSEQ(0) ack XSEQ+1 (fails tcp_sequence test)
+ *
+ * At this point the connection will deadlock with host1 believing
+ * that his FIN is never ACK'd, and thus it will retransmit it's FIN
+ * forever. The following fix is from Taral (taral@taral.net).
+ */
+
/* step 1: check sequence number */
- if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
+ if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq) &&
+ !(th->fin && TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)) {
if (!th->rst) {
tcp_send_ack(sk);
goto discard;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 660e64c44..b5070c3a7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.164 1999/01/04 20:36:55 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.175 1999/05/08 21:09:54 davem Exp $
*
* IPv4 specific functions
*
@@ -629,6 +629,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (!tcp_v4_unique_address(sk)) {
kfree_skb(buff);
+ sk->daddr = 0;
return -EADDRNOTAVAIL;
}
@@ -657,7 +658,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static int tcp_v4_sendmsg(struct sock *sk, struct msghdr *msg, int len)
{
- struct tcp_opt *tp;
int retval = -EINVAL;
/* Do sanity checking for sendmsg/sendto/send. */
@@ -679,15 +679,7 @@ static int tcp_v4_sendmsg(struct sock *sk, struct msghdr *msg, int len)
if (addr->sin_addr.s_addr != sk->daddr)
goto out;
}
-
- lock_sock(sk);
- retval = tcp_do_sendmsg(sk, msg->msg_iovlen, msg->msg_iov,
- msg->msg_flags);
- /* Push out partial tail frames if needed. */
- tp = &(sk->tp_pinfo.af_tcp);
- if(tp->send_head && tcp_snd_test(sk, tp->send_head))
- tcp_write_xmit(sk);
- release_sock(sk);
+ retval = tcp_do_sendmsg(sk, msg);
out:
return retval;
@@ -731,10 +723,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
/*
* This routine does path mtu discovery as defined in RFC1191.
*/
-static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip)
+static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip, unsigned mtu)
{
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+ if (atomic_read(&sk->sock_readers))
+ return;
+
/* Don't interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
* unfragmented).
@@ -748,21 +743,20 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *ip)
* There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
- if (sk->ip_pmtudisc != IP_PMTUDISC_DONT && sk->dst_cache) {
- if (tp->pmtu_cookie > sk->dst_cache->pmtu &&
- !atomic_read(&sk->sock_readers)) {
- lock_sock(sk);
- tcp_sync_mss(sk, sk->dst_cache->pmtu);
-
- /* Resend the TCP packet because it's
- * clear that the old packet has been
- * dropped. This is the new "fast" path mtu
- * discovery.
- */
- tcp_simple_retransmit(sk);
- release_sock(sk);
- } /* else let the usual retransmit timer handle it */
- }
+ if (sk->dst_cache == NULL)
+ return;
+ ip_rt_update_pmtu(sk->dst_cache, mtu);
+ if (sk->ip_pmtudisc != IP_PMTUDISC_DONT &&
+ tp->pmtu_cookie > sk->dst_cache->pmtu) {
+ tcp_sync_mss(sk, sk->dst_cache->pmtu);
+
+ /* Resend the TCP packet because it's
+ * clear that the old packet has been
+ * dropped. This is the new "fast" path mtu
+ * discovery.
+ */
+ tcp_simple_retransmit(sk);
+ } /* else let the usual retransmit timer handle it */
}
/*
@@ -789,6 +783,11 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
struct tcp_opt *tp;
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
+#if ICMP_MIN_LENGTH < 14
+ int no_flags = 0;
+#else
+#define no_flags 0
+#endif
struct sock *sk;
__u32 seq;
int err;
@@ -797,6 +796,10 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
icmp_statistics.IcmpInErrors++;
return;
}
+#if ICMP_MIN_LENGTH < 14
+ if (len < (iph->ihl << 2) + 14)
+ no_flags = 1;
+#endif
th = (struct tcphdr*)(dp+(iph->ihl<<2));
@@ -816,7 +819,7 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
switch (type) {
case ICMP_SOURCE_QUENCH:
#ifndef OLD_SOURCE_QUENCH /* This is deprecated */
- tp->snd_ssthresh = max(tp->snd_cwnd >> 1, 2);
+ tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_cnt = 0;
tp->high_seq = tp->snd_nxt;
@@ -830,7 +833,7 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
return;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
- do_pmtu_discovery(sk, iph);
+ do_pmtu_discovery(sk, iph, ntohs(skb->h.icmph->un.frag.mtu));
return;
}
@@ -863,7 +866,7 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
* ACK should set the opening flag, but that is too
* complicated right now.
*/
- if (!th->syn && !th->ack)
+ if (!no_flags && !th->syn && !th->ack)
return;
req = tcp_v4_search_req(tp, iph, th, &prev);
@@ -898,7 +901,7 @@ void tcp_v4_err(struct sk_buff *skb, unsigned char *dp, int len)
break;
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen */
- if (!th->syn)
+ if (!no_flags && !th->syn)
return;
tcp_statistics.TcpAttemptFails++;
sk->err = err;
@@ -1305,6 +1308,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
if(newsk != NULL) {
struct tcp_opt *newtp;
+#ifdef CONFIG_FILTER
+ struct sk_filter *filter;
+#endif
memcpy(newsk, sk, sizeof(*newsk));
newsk->sklist_next = NULL;
@@ -1325,6 +1331,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->pair = NULL;
skb_queue_head_init(&newsk->back_log);
skb_queue_head_init(&newsk->error_queue);
+#ifdef CONFIG_FILTER
+ if ((filter = newsk->filter) != NULL)
+ sk_filter_charge(newsk, filter);
+#endif
/* Now setup tcp_opt */
newtp = &(newsk->tp_pinfo.af_tcp);
@@ -1348,7 +1358,14 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newtp->last_ack_sent = req->rcv_isn + 1;
newtp->backoff = 0;
newtp->mdev = TCP_TIMEOUT_INIT;
- newtp->snd_cwnd = 1;
+
+ /* So many TCP implementations out there (incorrectly) count the
+ * initial SYN frame in their delayed-ACK and congestion control
+ * algorithms that we must have the following bandaid to talk
+ * efficiently to them. -DaveM
+ */
+ newtp->snd_cwnd = 2;
+
newtp->rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
newtp->fackets_out = 0;
@@ -1413,7 +1430,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
}
if (newtp->tstamp_ok) {
newtp->ts_recent = req->ts_recent;
- newtp->ts_recent_stamp = jiffies;
+ newtp->ts_recent_stamp = tcp_time_stamp;
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else {
newtp->tcp_header_len = sizeof(struct tcphdr);
@@ -1556,19 +1573,11 @@ static inline struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb)
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
#ifdef CONFIG_FILTER
- if (sk->filter)
- {
- if (sk_filter(skb, sk->filter_data, sk->filter))
- goto discard;
- }
+ struct sk_filter *filter = sk->filter;
+ if (filter && sk_filter(skb, filter))
+ goto discard;
#endif /* CONFIG_FILTER */
- /*
- * socket locking is here for SMP purposes as backlog rcv
- * is currently called with bh processing disabled.
- */
- lock_sock(sk);
-
/*
* This doesn't check if the socket has enough room for the packet.
* Either process the packet _without_ queueing it and then free it,
@@ -1579,7 +1588,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->state == TCP_ESTABLISHED) { /* Fast path */
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
goto reset;
- release_sock(sk);
return 0;
}
@@ -1590,14 +1598,22 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
nsk = tcp_v4_hnd_req(sk, skb);
if (!nsk)
goto discard;
- lock_sock(nsk);
- release_sock(sk);
+
+ /*
+ * Queue it on the new socket if the new socket is active,
+ * otherwise we just shortcircuit this and continue with
+ * the new socket..
+ */
+ if (atomic_read(&nsk->sock_readers)) {
+ skb_orphan(skb);
+ __skb_queue_tail(&nsk->back_log, skb);
+ return 0;
+ }
sk = nsk;
}
if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
goto reset;
- release_sock(sk);
return 0;
reset:
@@ -1609,7 +1625,6 @@ discard:
* might be destroyed here. This current version compiles correctly,
* but you have been warned.
*/
- release_sock(sk);
return 0;
}
@@ -1831,10 +1846,16 @@ static int tcp_v4_init_sock(struct sock *sk)
tp->mdev = TCP_TIMEOUT_INIT;
tp->mss_clamp = ~0;
+ /* So many TCP implementations out there (incorrectly) count the
+ * initial SYN frame in their delayed-ACK and congestion control
+ * algorithms that we must have the following bandaid to talk
+ * efficiently to them. -DaveM
+ */
+ tp->snd_cwnd = 2;
+
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
- tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_ssthresh = 0x7fffffff; /* Infinity */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3e99d80db..9a096f0f3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_output.c,v 1.101 1999/01/20 07:20:14 davem Exp $
+ * Version: $Id: tcp_output.c,v 1.108 1999/05/08 21:48:59 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -30,6 +30,7 @@
* David S. Miller : Charge memory using the right skb
* during syn/ack processing.
* David S. Miller : Output engine completely rewritten.
+ * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
*
*/
@@ -135,7 +136,8 @@ void tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
(sysctl_flags & SYSCTL_FLAG_SACK),
(sysctl_flags & SYSCTL_FLAG_WSCALE),
tp->rcv_wscale,
- TCP_SKB_CB(skb)->when);
+ TCP_SKB_CB(skb)->when,
+ tp->ts_recent);
} else {
tcp_build_and_update_options((__u32 *)(th + 1),
tp, TCP_SKB_CB(skb)->when);
@@ -165,7 +167,7 @@ void tcp_send_skb(struct sock *sk, struct sk_buff *skb, int force_queue)
if (!force_queue && tp->send_head == NULL && tcp_snd_test(sk, skb)) {
/* Send it out now. */
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
@@ -231,8 +233,9 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
buff->csum = csum_partial_copy(skb->data + len, skb_put(buff, nsize),
nsize, 0);
- TCP_SKB_CB(skb)->end_seq -= nsize;
- skb_trim(skb, skb->len - nsize);
+ /* This takes care of the FIN sequence number too. */
+ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
+ skb_trim(skb, len);
/* Rechecksum original buffer. */
skb->csum = csum_partial(skb->data, skb->len, 0);
@@ -341,7 +344,7 @@ void tcp_write_xmit(struct sock *sk)
/* Advance the send_head. This one is going out. */
update_send_head(sk);
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
@@ -518,24 +521,39 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
void tcp_simple_retransmit(struct sock *sk)
{
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- struct sk_buff *skb;
- unsigned int mss = tcp_current_mss(sk);
+ struct sk_buff *skb, *old_next_skb;
+ unsigned int mss = tcp_current_mss(sk);
/* Don't muck with the congestion window here. */
tp->dup_acks = 0;
tp->high_seq = tp->snd_nxt;
- tp->retrans_head = NULL;
+ tp->retrans_head = NULL;
/* Input control flow will see that this was retransmitted
* and not use it for RTT calculation in the absence of
* the timestamp option.
*/
- for (skb = skb_peek(&sk->write_queue);
+ for (old_next_skb = skb = skb_peek(&sk->write_queue);
((skb != tp->send_head) &&
(skb != (struct sk_buff *)&sk->write_queue));
- skb = skb->next)
- if (skb->len > mss)
- tcp_retransmit_skb(sk, skb);
+ skb = skb->next) {
+ int resend_skb = 0;
+
+ /* Our goal is to push out the packets which we
+ * sent already, but are being chopped up now to
+ * account for the PMTU information we have.
+ *
+ * As we resend the queue, packets are fragmented
+ * into two pieces, and when we try to send the
+ * second piece it may be collapsed together with
+ * a subsequent packet, and so on. -DaveM
+ */
+ if (old_next_skb != skb || skb->len > mss)
+ resend_skb = 1;
+ old_next_skb = skb->next;
+ if (resend_skb != 0)
+ tcp_retransmit_skb(sk, skb);
+ }
}
static __inline__ void update_retrans_head(struct sock *sk)
@@ -578,6 +596,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if(tp->af_specific->rebuild_header(sk))
return 1; /* Routing failure or similar. */
+ /* Some Solaris stacks overoptimize and ignore the FIN on a
+ * retransmit when old data is attached. So strip it off
+ * since it is cheap to do so and saves bytes on the network.
+ */
+ if(skb->len > 0 &&
+ (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+ tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
+ TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
+ skb_trim(skb, 0);
+ skb->csum = 0;
+ }
+
/* Ok, we're gonna send it out, update state. */
TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_RETRANS;
tp->retrans_out++;
@@ -585,11 +615,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Make a copy, if the first transmission SKB clone we made
* is still in somebody's hands, else make a clone.
*/
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
if(skb_cloned(skb))
skb = skb_copy(skb, GFP_ATOMIC);
else
skb = skb_clone(skb, GFP_ATOMIC);
+
tcp_transmit_skb(sk, skb);
/* Update global TCP statistics and return success. */
@@ -707,7 +738,7 @@ void tcp_send_fin(struct sock *sk)
tp->packets_out &&
!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG)) {
update_send_head(sk);
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
@@ -762,7 +793,7 @@ void tcp_send_active_reset(struct sock *sk)
/* Send it off. */
TCP_SKB_CB(skb)->seq = tp->write_seq;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_transmit_skb(sk, skb);
}
@@ -792,7 +823,7 @@ int tcp_send_synack(struct sock *sk)
TCP_SKB_CB(skb)->seq = tp->snd_una;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
__skb_queue_tail(&sk->write_queue, skb);
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
return 0;
@@ -859,10 +890,11 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(req->rcv_wnd);
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_syn_build_options((__u32 *)(th + 1), req->mss, req->tstamp_ok,
req->sack_ok, req->wscale_ok, req->rcv_wscale,
- TCP_SKB_CB(skb)->when);
+ TCP_SKB_CB(skb)->when,
+ req->ts_recent);
skb->csum = 0;
th->doff = (tcp_header_size >> 2);
@@ -946,7 +978,7 @@ void tcp_connect(struct sock *sk, struct sk_buff *buff, int mtu)
/* Send it off. */
__skb_queue_tail(&sk->write_queue, buff);
- TCP_SKB_CB(buff)->when = jiffies;
+ TCP_SKB_CB(buff)->when = tcp_time_stamp;
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
tcp_statistics.TcpActiveOpens++;
@@ -977,7 +1009,7 @@ void tcp_send_delayed_ack(struct tcp_opt *tp, int max_timeout)
tp->delack_timer.expires = timeout;
add_timer(&tp->delack_timer);
} else {
- if (timeout < tp->delack_timer.expires)
+ if (time_before(timeout, tp->delack_timer.expires))
mod_timer(&tp->delack_timer, timeout);
}
}
@@ -1020,7 +1052,7 @@ void tcp_send_ack(struct sock *sk)
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tp->snd_nxt;
- TCP_SKB_CB(buff)->when = jiffies;
+ TCP_SKB_CB(buff)->when = tcp_time_stamp;
tcp_transmit_skb(sk, buff);
}
}
@@ -1058,7 +1090,7 @@ void tcp_write_wakeup(struct sock *sk)
return; /* Let a retransmit get it. */
}
update_send_head(sk);
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
@@ -1084,7 +1116,7 @@ void tcp_write_wakeup(struct sock *sk)
*/
TCP_SKB_CB(skb)->seq = tp->snd_nxt - 1;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
- TCP_SKB_CB(skb)->when = jiffies;
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_transmit_skb(sk, skb);
}
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 41e54309c..ad6ccace9 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_timer.c,v 1.57 1999/01/20 07:20:21 davem Exp $
+ * Version: $Id: tcp_timer.c,v 1.62 1999/05/08 21:09:55 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -224,7 +224,7 @@ static __inline__ int tcp_keepopen_proc(struct sock *sk)
if ((1<<sk->state) & (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT2)) {
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- __u32 elapsed = jiffies - tp->rcv_tstamp;
+ __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
if (elapsed >= sysctl_tcp_keepalive_time) {
if (tp->probes_out > sysctl_tcp_keepalive_probes) {
@@ -317,48 +317,47 @@ static void tcp_twkill(unsigned long data)
void tcp_tw_schedule(struct tcp_tw_bucket *tw)
{
int slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
+ struct tcp_tw_bucket **tpp = &tcp_tw_death_row[slot];
+
+ if((tw->next_death = *tpp) != NULL)
+ (*tpp)->pprev_death = &tw->next_death;
+ *tpp = tw;
+ tw->pprev_death = tpp;
tw->death_slot = slot;
- tw->next_death = tcp_tw_death_row[slot];
- tcp_tw_death_row[slot] = tw;
+
tcp_inc_slow_timer(TCP_SLT_TWKILL);
}
/* Happens rarely if at all, no care about scalability here. */
void tcp_tw_reschedule(struct tcp_tw_bucket *tw)
{
- struct tcp_tw_bucket *walk;
- int slot = tw->death_slot;
+ struct tcp_tw_bucket **tpp;
+ int slot;
+
+ if(tw->next_death)
+ tw->next_death->pprev_death = tw->pprev_death;
+ *tw->pprev_death = tw->next_death;
+ tw->pprev_death = NULL;
- walk = tcp_tw_death_row[slot];
- if(walk == tw) {
- tcp_tw_death_row[slot] = tw->next_death;
- } else {
- while(walk->next_death != tw)
- walk = walk->next_death;
- walk->next_death = tw->next_death;
- }
slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
+ tpp = &tcp_tw_death_row[slot];
+ if((tw->next_death = *tpp) != NULL)
+ (*tpp)->pprev_death = &tw->next_death;
+ *tpp = tw;
+ tw->pprev_death = tpp;
+
tw->death_slot = slot;
- tw->next_death = tcp_tw_death_row[slot];
- tcp_tw_death_row[slot] = tw;
/* Timer was incremented when we first entered the table. */
}
/* This is for handling early-kills of TIME_WAIT sockets. */
void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
{
- struct tcp_tw_bucket *walk;
- int slot = tw->death_slot;
-
- walk = tcp_tw_death_row[slot];
- if(walk == tw) {
- tcp_tw_death_row[slot] = tw->next_death;
- } else {
- while(walk->next_death != tw)
- walk = walk->next_death;
- walk->next_death = tw->next_death;
- }
+ if(tw->next_death)
+ tw->next_death->pprev_death = tw->pprev_death;
+ *tw->pprev_death = tw->next_death;
+ tw->pprev_death = NULL;
tcp_dec_slow_timer(TCP_SLT_TWKILL);
}
@@ -403,7 +402,7 @@ static void tcp_keepalive(unsigned long data)
for(i = chain_start; i < (chain_start + ((TCP_HTABLE_SIZE/2) >> 2)); i++) {
struct sock *sk = tcp_established_hash[i];
while(sk) {
- if(sk->keepopen) {
+ if(!atomic_read(&sk->sock_readers) && sk->keepopen) {
count += tcp_keepopen_proc(sk);
if(count == sysctl_tcp_max_ka_probes)
goto out;
@@ -445,7 +444,6 @@ void tcp_retransmit_timer(unsigned long data)
tcp_reset_xmit_timer(sk, TIME_RETRANS, HZ/20);
return;
}
- lock_sock(sk);
/* Clear delay ack timer. */
tcp_clear_xmit_timer(sk, TIME_DACK);
@@ -479,7 +477,7 @@ void tcp_retransmit_timer(unsigned long data)
* means it must be an accurate representation of our current
* sending rate _and_ the snd_wnd.
*/
- tp->snd_ssthresh = max(min(tp->snd_wnd, tp->snd_cwnd) >> 1, 2);
+ tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd = 1;
}
@@ -510,8 +508,6 @@ void tcp_retransmit_timer(unsigned long data)
tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
tcp_write_timeout(sk);
-
- release_sock(sk);
}
/*
@@ -564,7 +560,7 @@ static void tcp_syn_recv_timer(unsigned long data)
if (!tp->syn_wait_queue)
break;
} else {
- __u32 timeo;
+ unsigned long timeo;
struct open_request *op;
(*conn->class->rtx_syn_ack)(sk, conn);
diff --git a/net/ipv4/timer.c b/net/ipv4/timer.c
index df3c9cce5..3821a7c4c 100644
--- a/net/ipv4/timer.c
+++ b/net/ipv4/timer.c
@@ -5,7 +5,7 @@
*
* TIMER - implementation of software timers for IP.
*
- * Version: $Id: timer.c,v 1.14 1998/11/07 11:55:43 davem Exp $
+ * Version: $Id: timer.c,v 1.15 1999/02/22 13:54:29 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -75,8 +75,7 @@ void net_timer (unsigned long data)
/* Only process if socket is not in use. */
if (atomic_read(&sk->sock_readers)) {
/* Try again later. */
- sk->timer.expires = jiffies+HZ/20;
- add_timer(&sk->timer);
+ mod_timer(&sk->timer, jiffies+HZ/20);
return;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 113b06ef8..5fcec9cf3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -5,7 +5,7 @@
*
* The User Datagram Protocol (UDP).
*
- * Version: $Id: udp.c,v 1.64 1998/11/08 11:17:07 davem Exp $
+ * Version: $Id: udp.c,v 1.66 1999/05/08 20:00:25 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -570,7 +570,6 @@ struct udpfakehdr
struct udphdr uh;
u32 saddr;
u32 daddr;
- u32 other;
struct iovec *iov;
u32 wcheck;
};
@@ -778,7 +777,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
ufh.daddr = ipc.addr = rt->rt_dst;
ufh.uh.len = htons(ulen);
ufh.uh.check = 0;
- ufh.other = (htons(ulen) << 16) + IPPROTO_UDP*256;
ufh.iov = msg->msg_iov;
ufh.wcheck = 0;
@@ -846,7 +844,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return(0);
}
-#if defined(CONFIG_FILTER) || !defined(HAVE_CSUM_COPY_USER)
+#ifndef HAVE_CSUM_COPY_USER
#undef CONFIG_UDP_DELAY_CSUM
#endif
@@ -890,11 +888,11 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
#else
- if (sk->no_check || skb->ip_summed==CHECKSUM_UNNECESSARY) {
+ if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
} else if (copied > msg->msg_iov[0].iov_len || (msg->msg_flags&MSG_TRUNC)) {
- if (csum_fold(csum_partial(skb->h.raw, ntohs(skb->h.uh->len), skb->csum)))
+ if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum)))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
@@ -907,7 +905,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
copied, csum, &err);
if (err)
goto out_free;
- if (csum_fold(csum))
+ if ((unsigned short)csum_fold(csum))
goto csum_copy_err;
}
#endif
@@ -957,7 +955,7 @@ csum_copy_err:
* Error for blocking case is chosen to masquerade
* as some normal condition.
*/
- return (msg->msg_flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
+ return (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
#endif
}
@@ -1030,6 +1028,19 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
* Charge it to the socket, dropping if the queue is full.
*/
+#if defined(CONFIG_FILTER) && defined(CONFIG_UDP_DELAY_CSUM)
+ if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum))) {
+ udp_statistics.UdpInErrors++;
+ ip_statistics.IpInDiscards++;
+ ip_statistics.IpInDelivers--;
+ kfree_skb(skb);
+ return -1;
+ }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+#endif
+
if (sock_queue_rcv_skb(sk,skb)<0) {
udp_statistics.UdpInErrors++;
ip_statistics.IpInDiscards++;
@@ -1179,7 +1190,7 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
if (sk == NULL) {
#ifdef CONFIG_UDP_DELAY_CSUM
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
- csum_fold(csum_partial((char*)uh, ulen, skb->csum)))
+ (unsigned short)csum_fold(csum_partial((char*)uh, ulen, skb->csum)))
goto csum_error;
#endif
udp_statistics.UdpNoPorts++;
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 15ef93ac6..e64706e83 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -11,7 +11,8 @@ O_TARGET := ipv6.o
IPV6_OBJS := af_inet6.o ip6_output.o ip6_input.o addrconf.o sit.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
- exthdrs.o sysctl_net_ipv6.o datagram.o proc.o
+ exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
+ ip6_flowlabel.o
MOD_LIST_NAME := IPV6_MODULES
M_OBJS := $(O_TARGET)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 362b606cf..f34975076 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: addrconf.c,v 1.46 1999/01/12 14:34:47 davem Exp $
+ * $Id: addrconf.c,v 1.48 1999/03/25 10:04:43 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -88,6 +88,34 @@ static struct timer_list addr_chk_timer = {
0, 0, addrconf_verify
};
+/* These locks protect only against address deletions,
+ but not against address adds or status updates.
+ It is OK. The only race is when address is selected,
+ which becomes invalid immediately after selection.
+ It is harmless, because this address could be already invalid
+ several usecs ago.
+
+ Its important, that:
+
+ 1. The result of inet6_add_addr() is used only inside lock
+ or from bh_atomic context.
+
+ 2. inet6_get_lladdr() is used only from bh protected context.
+
+ 3. The result of ipv6_chk_addr() is not used outside of bh protected context.
+ */
+
+static __inline__ void addrconf_lock(void)
+{
+ atomic_inc(&addr_list_lock);
+ synchronize_bh();
+}
+
+static __inline__ void addrconf_unlock(void)
+{
+ atomic_dec(&addr_list_lock);
+}
+
static int addrconf_ifdown(struct device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
@@ -188,7 +216,7 @@ static struct inet6_dev * ipv6_add_dev(struct device *dev)
if (dev->mtu < IPV6_MIN_MTU)
return NULL;
- ndev = kmalloc(sizeof(struct inet6_dev), gfp_any());
+ ndev = kmalloc(sizeof(struct inet6_dev), GFP_KERNEL);
if (ndev) {
memset(ndev, 0, sizeof(struct inet6_dev));
@@ -227,9 +255,9 @@ static struct inet6_dev * ipv6_find_idev(struct device *dev)
idev = ipv6_add_dev(dev);
if (idev == NULL)
return NULL;
+ if (dev->flags&IFF_UP)
+ ipv6_mc_up(idev);
}
- if (dev->flags&IFF_UP)
- ipv6_mc_up(idev);
return idev;
}
@@ -260,13 +288,13 @@ struct inet6_dev * ipv6_get_idev(struct device *dev)
return NULL;
}
-struct inet6_ifaddr * ipv6_add_addr(struct inet6_dev *idev,
- struct in6_addr *addr, int scope)
+static struct inet6_ifaddr *
+ipv6_add_addr(struct inet6_dev *idev, struct in6_addr *addr, int scope)
{
struct inet6_ifaddr *ifa;
int hash;
- ifa = kmalloc(sizeof(struct inet6_ifaddr), gfp_any());
+ ifa = kmalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
if (ifa == NULL) {
ADBG(("ipv6_add_addr: malloc failed\n"));
@@ -313,6 +341,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
for (; iter; iter = iter->lst_next) {
if (iter == ifp) {
*back = ifp->lst_next;
+ synchronize_bh();
+
ifp->lst_next = NULL;
break;
}
@@ -325,6 +355,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
for (; iter; iter = iter->if_next) {
if (iter == ifp) {
*back = ifp->if_next;
+ synchronize_bh();
+
ifp->if_next = NULL;
break;
}
@@ -343,24 +375,23 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
* ii) see if there is a specific route for the destination and use
* an address of the attached interface
* iii) don't use deprecated addresses
- *
- * at the moment I believe only iii) is missing.
*/
-struct inet6_ifaddr * ipv6_get_saddr(struct dst_entry *dst,
- struct in6_addr *daddr)
+int ipv6_get_saddr(struct dst_entry *dst,
+ struct in6_addr *daddr, struct in6_addr *saddr)
{
int scope;
struct inet6_ifaddr *ifp = NULL;
struct inet6_ifaddr *match = NULL;
struct device *dev = NULL;
struct rt6_info *rt;
+ int err;
int i;
rt = (struct rt6_info *) dst;
if (rt)
dev = rt->rt6i_dev;
- atomic_inc(&addr_list_lock);
+ addrconf_lock();
scope = ipv6_addr_scope(daddr);
if (rt && (rt->rt6i_flags & RTF_ALLONLINK)) {
@@ -388,10 +419,10 @@ struct inet6_ifaddr * ipv6_get_saddr(struct dst_entry *dst,
if (idev->dev == dev) {
for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
if (ifp->scope == scope) {
- if (!(ifp->flags & ADDR_STATUS))
+ if (!(ifp->flags & (ADDR_STATUS|DAD_STATUS)))
goto out;
- if (!(ifp->flags & ADDR_INVALID))
+ if (!(ifp->flags & (ADDR_INVALID|DAD_STATUS)))
match = ifp;
}
}
@@ -410,10 +441,10 @@ struct inet6_ifaddr * ipv6_get_saddr(struct dst_entry *dst,
for (i=0; i < IN6_ADDR_HSIZE; i++) {
for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) {
if (ifp->scope == scope) {
- if (!(ifp->flags & ADDR_STATUS))
+ if (!(ifp->flags & (ADDR_STATUS|DAD_STATUS)))
goto out;
- if (!(ifp->flags & ADDR_INVALID))
+ if (!(ifp->flags & (ADDR_INVALID|DAD_STATUS)))
match = ifp;
}
}
@@ -422,28 +453,30 @@ struct inet6_ifaddr * ipv6_get_saddr(struct dst_entry *dst,
out:
if (ifp == NULL)
ifp = match;
- atomic_dec(&addr_list_lock);
- return ifp;
+
+ err = -ENETUNREACH;
+ if (ifp) {
+ memcpy(saddr, &ifp->addr, sizeof(struct in6_addr));
+ err = 0;
+ }
+ addrconf_unlock();
+ return err;
}
struct inet6_ifaddr * ipv6_get_lladdr(struct device *dev)
{
- struct inet6_ifaddr *ifp;
+ struct inet6_ifaddr *ifp = NULL;
struct inet6_dev *idev;
- int hash;
- hash = ipv6_devindex_hash(dev->ifindex);
-
- for (idev = inet6_dev_lst[hash]; idev; idev=idev->next) {
- if (idev->dev == dev) {
- for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
- if (ifp->scope == IFA_LINK)
- return ifp;
- }
- break;
+ if ((idev = ipv6_get_idev(dev)) != NULL) {
+ addrconf_lock();
+ for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
+ if (ifp->scope == IFA_LINK)
+ break;
}
+ addrconf_unlock();
}
- return NULL;
+ return ifp;
}
/*
@@ -461,7 +494,7 @@ struct inet6_ifaddr * ipv6_chk_addr(struct in6_addr *addr, struct device *dev, i
if (!nd)
flags |= DAD_STATUS|ADDR_INVALID;
- atomic_inc(&addr_list_lock);
+ addrconf_lock();
hash = ipv6_addr_hash(addr);
for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
@@ -472,7 +505,7 @@ struct inet6_ifaddr * ipv6_chk_addr(struct in6_addr *addr, struct device *dev, i
}
}
- atomic_dec(&addr_list_lock);
+ addrconf_unlock();
return ifp;
}
@@ -665,13 +698,6 @@ void addrconf_prefix_rcv(struct device *dev, u8 *opt, int len)
}
/*
- * If we where using an "all destinations on link" route
- * delete it
- */
-
- rt6_purge_dflt_routers(RTF_ALLONLINK);
-
- /*
* Two things going on here:
* 1) Add routes for on-link prefixes
* 2) Configure prefixes with the auto flag set
@@ -845,14 +871,17 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen)
scope = ipv6_addr_scope(pfx);
- if ((ifp = ipv6_add_addr(idev, pfx, scope)) == NULL)
- return -ENOMEM;
-
- ifp->prefix_len = plen;
- ifp->flags |= ADDR_PERMANENT;
+ addrconf_lock();
+ if ((ifp = ipv6_add_addr(idev, pfx, scope)) != NULL) {
+ ifp->prefix_len = plen;
+ ifp->flags |= ADDR_PERMANENT;
+ addrconf_dad_start(ifp);
+ addrconf_unlock();
+ return 0;
+ }
+ addrconf_unlock();
- addrconf_dad_start(ifp);
- return 0;
+ return -ENOBUFS;
}
static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
@@ -870,20 +899,22 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
scope = ipv6_addr_scope(pfx);
+ start_bh_atomic();
for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) {
if (ifp->scope == scope && ifp->prefix_len == plen &&
(!memcmp(pfx, &ifp->addr, sizeof(struct in6_addr)))) {
ipv6_del_addr(ifp);
+ end_bh_atomic();
/* If the last address is deleted administratively,
disable IPv6 on this interface.
*/
-
if (idev->addr_list == NULL)
addrconf_ifdown(idev->dev, 1);
return 0;
}
}
+ end_bh_atomic();
return -EADDRNOTAVAIL;
}
@@ -940,12 +971,14 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
}
if (addr.s6_addr32[3]) {
+ addrconf_lock();
ifp = ipv6_add_addr(idev, &addr, scope);
if (ifp) {
ifp->flags |= ADDR_PERMANENT;
ifp->prefix_len = 128;
ipv6_ifa_notify(RTM_NEWADDR, ifp);
}
+ addrconf_unlock();
return;
}
@@ -967,17 +1000,17 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
flag |= IFA_HOST;
}
+ addrconf_lock();
ifp = ipv6_add_addr(idev, &addr, flag);
-
- if (ifp == NULL)
- continue;
-
- if (idev->dev->flags&IFF_POINTOPOINT)
- ifp->prefix_len = 10;
- else
- ifp->prefix_len = 96;
- ifp->flags |= ADDR_PERMANENT;
- ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ if (ifp) {
+ if (idev->dev->flags&IFF_POINTOPOINT)
+ ifp->prefix_len = 10;
+ else
+ ifp->prefix_len = 96;
+ ifp->flags |= ADDR_PERMANENT;
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ }
+ addrconf_unlock();
}
}
}
@@ -999,31 +1032,29 @@ static void init_loopback(struct device *dev)
return;
}
+ addrconf_lock();
ifp = ipv6_add_addr(idev, &addr, IFA_HOST);
- if (ifp == NULL) {
- printk(KERN_DEBUG "init_loopback: add_addr failed\n");
- return;
+ if (ifp) {
+ ifp->flags |= ADDR_PERMANENT;
+ ifp->prefix_len = 128;
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
}
-
- ifp->flags |= ADDR_PERMANENT;
- ifp->prefix_len = 128;
-
- ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ addrconf_unlock();
}
static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr)
{
struct inet6_ifaddr * ifp;
+ addrconf_lock();
ifp = ipv6_add_addr(idev, addr, IFA_LINK);
- if (ifp == NULL)
- return;
-
- ifp->flags = ADDR_PERMANENT;
- ifp->prefix_len = 10;
-
- addrconf_dad_start(ifp);
+ if (ifp) {
+ ifp->flags = ADDR_PERMANENT;
+ ifp->prefix_len = 10;
+ addrconf_dad_start(ifp);
+ }
+ addrconf_unlock();
}
static void addrconf_dev_config(struct device *dev)
@@ -1375,8 +1406,12 @@ static int iface_proc_info(char *buffer, char **start, off_t offset,
struct inet6_ifaddr *ifp;
int i;
int len = 0;
+ off_t pos=0;
+ off_t begin=0;
+
+ addrconf_lock();
- for (i=0; i < IN6_ADDR_HSIZE; i++)
+ for (i=0; i < IN6_ADDR_HSIZE; i++) {
for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) {
int j;
@@ -1393,14 +1428,25 @@ static int iface_proc_info(char *buffer, char **start, off_t offset,
ifp->scope,
ifp->flags,
ifp->idev->dev->name);
+ pos=begin+len;
+ if(pos<offset) {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ goto done;
}
+ }
- *start = buffer + offset;
-
- len -= offset;
-
- if (len > length)
- len = length;
+done:
+ addrconf_unlock();
+
+ *start=buffer+(offset-begin);
+ len-=(offset-begin);
+ if(len>length)
+ len=length;
+ if(len<0)
+ len=0;
return len;
}
@@ -1423,6 +1469,12 @@ void addrconf_verify(unsigned long foo)
unsigned long now = jiffies;
int i;
+ if (atomic_read(&addr_list_lock)) {
+ addr_chk_timer.expires = jiffies + 1*HZ;
+ add_timer(&addr_chk_timer);
+ return;
+ }
+
for (i=0; i < IN6_ADDR_HSIZE; i++) {
for (ifp=inet6_addr_lst[i]; ifp;) {
if (ifp->flags & ADDR_INVALID) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7f11aa556..36ab229ed 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,7 +7,7 @@
*
* Adapted from linux/net/ipv4/af_inet.c
*
- * $Id: af_inet6.c,v 1.42 1999/01/19 08:20:06 davem Exp $
+ * $Id: af_inet6.c,v 1.43 1999/04/22 10:07:39 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -190,7 +190,7 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
__u32 v4addr = 0;
- unsigned short snum = 0;
+ unsigned short snum;
int addr_type = 0;
/* If the socket has its own bind function then use it. */
@@ -203,12 +203,6 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
(sk->num != 0))
return -EINVAL;
- snum = ntohs(addr->sin6_port);
- if (snum == 0)
- snum = sk->prot->good_socknum();
- if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
- return(-EACCES);
-
addr_type = ipv6_addr_type(&addr->sin6_addr);
if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
return(-EINVAL);
@@ -241,6 +235,12 @@ static int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
memcpy(&sk->net_pinfo.af_inet6.saddr, &addr->sin6_addr,
sizeof(struct in6_addr));
+ snum = ntohs(addr->sin6_port);
+ if (snum == 0)
+ snum = sk->prot->good_socknum();
+ if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+ return(-EACCES);
+
/* Make sure we are allowed to bind here. */
if(sk->prot->verify_bind(sk, snum))
return -EADDRINUSE;
@@ -292,6 +292,9 @@ int inet6_destroy_sock(struct sock *sk)
if ((skb = xchg(&sk->net_pinfo.af_inet6.pktoptions, NULL)) != NULL)
kfree_skb(skb);
+ /* Free flowlabels */
+ fl6_free_socklist(sk);
+
/* Free tx options */
if ((opt = xchg(&sk->net_pinfo.af_inet6.opt, NULL)) != NULL)
@@ -311,6 +314,7 @@ static int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk;
sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
sk = sock->sk;
if (peer) {
if (!tcp_connected(sk->state))
@@ -318,6 +322,8 @@ static int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
sin->sin6_port = sk->dport;
memcpy(&sin->sin6_addr, &sk->net_pinfo.af_inet6.daddr,
sizeof(struct in6_addr));
+ if (sk->net_pinfo.af_inet6.sndflow)
+ sin->sin6_flowinfo = sk->net_pinfo.af_inet6.flow_label;
} else {
if (ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr) == IPV6_ADDR_ANY)
memcpy(&sin->sin6_addr,
@@ -537,6 +543,7 @@ __initfunc(void inet6_proto_init(struct net_proto *pro))
ipv6_netdev_notif_init();
ipv6_packet_init();
ip6_route_init();
+ ip6_flowlabel_init();
addrconf_init();
sit_init();
@@ -592,6 +599,7 @@ void cleanup_module(void)
/* Cleanup code parts. */
sit_cleanup();
ipv6_netdev_notif_cleanup();
+ ip6_flowlabel_cleanup();
addrconf_cleanup();
ip6_route_cleanup();
ipv6_packet_cleanup();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index cd8725ded..4fc785829 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: datagram.c,v 1.16 1998/10/03 09:38:25 davem Exp $
+ * $Id: datagram.c,v 1.17 1999/04/22 10:07:40 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -132,10 +132,13 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin = (struct sockaddr_in6 *)msg->msg_name;
if (sin) {
sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
sin->sin6_port = serr->port;
- if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6)
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
memcpy(&sin->sin6_addr, skb->nh.raw + serr->addr_offset, 16);
- else
+ if (sk->net_pinfo.af_inet6.sndflow)
+ sin->sin6_flowinfo = *(u32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK;
+ } else
ipv6_addr_set(&sin->sin6_addr, 0, 0,
__constant_htonl(0xffff),
*(u32*)(skb->nh.raw + serr->addr_offset));
@@ -146,6 +149,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_family = AF_UNSPEC;
if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) {
memcpy(&sin->sin6_addr, &skb->nh.ipv6h->saddr, 16);
if (sk->net_pinfo.af_inet6.rxopt.all)
@@ -199,6 +203,10 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
put_cmsg(msg, SOL_IPV6, IPV6_HOPLIMIT, sizeof(hlim), &hlim);
}
+ if (np->rxopt.bits.rxflow && (*(u32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) {
+ u32 flowinfo = *(u32*)skb->nh.raw & IPV6_FLOWINFO_MASK;
+ put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
+ }
if (np->rxopt.bits.hopopts && opt->hop) {
u8 *ptr = skb->nh.raw + opt->hop;
put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
@@ -222,8 +230,8 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
return 0;
}
-int datagram_send_ctl(struct msghdr *msg, int *oif,
- struct in6_addr **src_addr, struct ipv6_txoptions *opt,
+int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
+ struct ipv6_txoptions *opt,
int *hlimit)
{
struct in6_pktinfo *src_info;
@@ -235,17 +243,15 @@ int datagram_send_ctl(struct msghdr *msg, int *oif,
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
- if ((unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ if (cmsg->cmsg_len < sizeof(struct cmsghdr) ||
+ (unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ cmsg->cmsg_len) > msg->msg_controllen) {
err = -EINVAL;
goto exit_f;
}
- if (cmsg->cmsg_level != SOL_IPV6) {
- if (net_ratelimit())
- printk(KERN_DEBUG "invalid cmsg_level %d\n", cmsg->cmsg_level);
+ if (cmsg->cmsg_level != SOL_IPV6)
continue;
- }
switch (cmsg->cmsg_type) {
case IPV6_PKTINFO:
@@ -257,9 +263,9 @@ int datagram_send_ctl(struct msghdr *msg, int *oif,
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (src_info->ipi6_ifindex) {
- if (*oif && src_info->ipi6_ifindex != *oif)
+ if (fl->oif && src_info->ipi6_ifindex != fl->oif)
return -EINVAL;
- *oif = src_info->ipi6_ifindex;
+ fl->oif = src_info->ipi6_ifindex;
}
if (!ipv6_addr_any(&src_info->ipi6_addr)) {
@@ -272,11 +278,26 @@ int datagram_send_ctl(struct msghdr *msg, int *oif,
goto exit_f;
}
- *src_addr = &src_info->ipi6_addr;
+ fl->fl6_src = &src_info->ipi6_addr;
}
break;
+ case IPV6_FLOWINFO:
+ if (cmsg->cmsg_len < CMSG_LEN(4)) {
+ err = -EINVAL;
+ goto exit_f;
+ }
+
+ if (fl->fl6_flowlabel&IPV6_FLOWINFO_MASK) {
+ if ((fl->fl6_flowlabel^*(u32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) {
+ err = -EINVAL;
+ goto exit_f;
+ }
+ }
+ fl->fl6_flowlabel = IPV6_FLOWINFO_MASK & *(u32 *)CMSG_DATA(cmsg);
+ break;
+
case IPV6_HOPOPTS:
if (opt->hopopt || cmsg->cmsg_len < CMSG_LEN(sizeof(struct ipv6_opt_hdr))) {
err = -EINVAL;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 8f49443e6..3760be8eb 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: icmp.c,v 1.20 1998/10/03 09:38:31 davem Exp $
+ * $Id: icmp.c,v 1.21 1999/03/21 05:22:51 davem Exp $
*
* Based on net/ipv4/icmp.c
*
@@ -200,9 +200,11 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
* this lookup should be more aggressive (not longer than timeout).
*/
dst = ip6_route_output(sk, fl);
- if (dst->error)
+ if (dst->error) {
ipv6_statistics.Ip6OutNoRoutes++;
- else {
+ } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
+ res = 1;
+ } else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = sysctl_icmpv6_time;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bad3a13ec..d20925c95 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_fib.c,v 1.15 1998/08/26 12:04:55 davem Exp $
+ * $Id: ip6_fib.c,v 1.17 1999/04/22 10:07:41 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -103,8 +103,8 @@ static struct fib6_walker_t fib6_walker_list = {
static __inline__ u32 fib6_new_sernum(void)
{
u32 n = ++rt_sernum;
- if (n == 0)
- n = ++rt_sernum;
+ if ((__s32)n <= 0)
+ rt_sernum = n = 1;
return n;
}
@@ -890,9 +890,6 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp)
RT6_TRACE("fib6_del_route\n");
- if (!(rt->rt6i_flags&RTF_CACHE))
- fib6_prune_clones(fn, rt);
-
/* Unlink it */
*rtp = rt->u.next;
rt->rt6i_node = NULL;
@@ -939,6 +936,9 @@ int fib6_del(struct rt6_info *rt)
BUG_TRAP(fn->fn_flags&RTN_RTINFO);
+ if (!(rt->rt6i_flags&RTF_CACHE))
+ fib6_prune_clones(fn, rt);
+
/*
* Walk the leaf entries looking for ourself
*/
@@ -1157,7 +1157,6 @@ static int fib6_age(struct rt6_info *rt, void *arg)
return -1;
}
gc_args.more++;
- return 0;
}
/*
@@ -1171,7 +1170,6 @@ static int fib6_age(struct rt6_info *rt, void *arg)
return -1;
}
gc_args.more++;
- return 0;
}
return 0;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
new file mode 100644
index 000000000..9aa60db40
--- /dev/null
+++ b/net/ipv6/ip6_flowlabel.c
@@ -0,0 +1,620 @@
+/*
+ * ip6_flowlabel.c IPv6 flowlabel manager.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/route.h>
+#include <linux/proc_fs.h>
+
+#include <net/sock.h>
+
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+#include <net/protocol.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+#include <net/rawv6.h>
+#include <net/icmp.h>
+#include <net/transp_v6.h>
+
+#include <asm/uaccess.h>
+
+#define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
+ in old IPv6 RFC. Well, it was reasonable value.
+ */
+#define FL_MAX_LINGER 60 /* Maximal linger timeout */
+
+/* FL hash table */
+
+#define FL_MAX_PER_SOCK 32
+#define FL_MAX_SIZE 4096
+#define FL_HASH_MASK 255
+#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
+
+static atomic_t fl_size = ATOMIC_INIT(0);
+static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1];
+
+static struct timer_list ip6_fl_gc_timer;
+
+/* FL hash table lock: it protects only of GC */
+
+static atomic_t ip6_fl_lock = ATOMIC_INIT(0);
+
+static __inline__ void fl_lock(void)
+{
+ atomic_inc(&ip6_fl_lock);
+ synchronize_bh();
+}
+
+static __inline__ void fl_unlock(void)
+{
+ atomic_dec(&ip6_fl_lock);
+}
+
+static struct ip6_flowlabel * fl_lookup(u32 label)
+{
+ struct ip6_flowlabel *fl;
+
+ fl_lock();
+ for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
+ if (fl->label == label) {
+ atomic_inc(&fl->users);
+ break;
+ }
+ }
+ fl_unlock();
+ return fl;
+}
+
+static void fl_free(struct ip6_flowlabel *fl)
+{
+ if (fl->opt)
+ kfree(fl->opt);
+ kfree(fl);
+}
+
+static void fl_release(struct ip6_flowlabel *fl)
+{
+ fl_lock();
+ fl->lastuse = jiffies;
+ if (atomic_dec_and_test(&fl->users)) {
+ unsigned long ttd = fl->lastuse + fl->linger;
+ if ((long)(ttd - fl->expires) > 0)
+ fl->expires = ttd;
+ ttd = fl->expires;
+ if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
+ struct ipv6_txoptions *opt = fl->opt;
+ fl->opt = NULL;
+ kfree(opt);
+ }
+ if (!del_timer(&ip6_fl_gc_timer) ||
+ (long)(ip6_fl_gc_timer.expires - ttd) > 0)
+ ip6_fl_gc_timer.expires = ttd;
+ add_timer(&ip6_fl_gc_timer);
+ }
+ fl_unlock();
+}
+
+static void ip6_fl_gc(unsigned long dummy)
+{
+ int i;
+ unsigned long now = jiffies;
+ unsigned long sched = 0;
+
+ if (atomic_read(&ip6_fl_lock)) {
+ ip6_fl_gc_timer.expires = now + HZ/10;
+ add_timer(&ip6_fl_gc_timer);
+ return;
+ }
+
+ for (i=0; i<=FL_HASH_MASK; i++) {
+ struct ip6_flowlabel *fl, **flp;
+ flp = &fl_ht[i];
+ while ((fl=*flp) != NULL) {
+ if (atomic_read(&fl->users) == 0) {
+ unsigned long ttd = fl->lastuse + fl->linger;
+ if ((long)(ttd - fl->expires) > 0)
+ fl->expires = ttd;
+ ttd = fl->expires;
+ if ((long)(now - ttd) >= 0) {
+ *flp = fl->next;
+ fl_free(fl);
+ atomic_dec(&fl_size);
+ continue;
+ }
+ if (!sched || (long)(ttd - sched) < 0)
+ sched = ttd;
+ }
+ flp = &fl->next;
+ }
+ }
+ if (!sched && atomic_read(&fl_size))
+ sched = now + FL_MAX_LINGER;
+ if (sched) {
+ ip6_fl_gc_timer.expires = sched;
+ add_timer(&ip6_fl_gc_timer);
+ }
+}
+
+static int fl_intern(struct ip6_flowlabel *fl, __u32 label)
+{
+ fl->label = label & IPV6_FLOWLABEL_MASK;
+
+ fl_lock();
+ if (label == 0) {
+ for (;;) {
+ fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
+ if (fl->label) {
+ struct ip6_flowlabel *lfl;
+ lfl = fl_lookup(fl->label);
+ if (lfl == NULL)
+ break;
+ fl_release(lfl);
+ }
+ }
+ }
+
+ fl->lastuse = jiffies;
+ fl->next = fl_ht[FL_HASH(fl->label)];
+ fl_ht[FL_HASH(fl->label)] = fl;
+ atomic_inc(&fl_size);
+ fl_unlock();
+ return 0;
+}
+
+
+
+/* Socket flowlabel lists */
+
+struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, u32 label)
+{
+ struct ipv6_fl_socklist *sfl;
+ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
+
+ label &= IPV6_FLOWLABEL_MASK;
+
+ for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
+ struct ip6_flowlabel *fl = sfl->fl;
+ if (fl->label == label) {
+ fl->lastuse = jiffies;
+ atomic_inc(&fl->users);
+ return fl;
+ }
+ }
+ return NULL;
+}
+
+void fl6_free_socklist(struct sock *sk)
+{
+ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
+ struct ipv6_fl_socklist *sfl;
+
+ while ((sfl = np->ipv6_fl_list) != NULL) {
+ np->ipv6_fl_list = sfl->next;
+ fl_release(sfl->fl);
+ kfree(sfl);
+ }
+}
+
+/* Service routines */
+
+
+/*
+ It is the only difficult place. flowlabel enforces equal headers
+ before and including routing header, however user may supply options
+ following rthdr.
+ */
+
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
+ struct ip6_flowlabel * fl,
+ struct ipv6_txoptions * fopt)
+{
+ struct ipv6_txoptions * fl_opt = fl->opt;
+
+ if (fopt == NULL || fopt->opt_flen == 0)
+ return fl_opt;
+
+ if (fl_opt != NULL) {
+ opt_space->hopopt = fl_opt->hopopt;
+ opt_space->dst0opt = fl_opt->dst0opt;
+ opt_space->srcrt = fl_opt->srcrt;
+ opt_space->opt_nflen = fl_opt->opt_nflen;
+ } else {
+ if (fopt->opt_nflen == 0)
+ return fopt;
+ opt_space->hopopt = NULL;
+ opt_space->dst0opt = NULL;
+ opt_space->srcrt = NULL;
+ opt_space->opt_nflen = 0;
+ }
+ opt_space->dst1opt = fopt->dst1opt;
+ opt_space->auth = fopt->auth;
+ opt_space->opt_flen = fopt->opt_flen;
+ return opt_space;
+}
+
+static __u32 check_linger(__u16 ttl)
+{
+ if (ttl < FL_MIN_LINGER)
+ return FL_MIN_LINGER*HZ;
+ if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
+ return 0;
+ return ttl*HZ;
+}
+
+static int fl6_renew(struct ip6_flowlabel *fl, unsigned linger, unsigned expires)
+{
+ linger = check_linger(linger);
+ if (!linger)
+ return -EPERM;
+ expires = check_linger(expires);
+ if (!expires)
+ return -EPERM;
+ fl->lastuse = jiffies;
+ if (fl->linger < linger)
+ fl->linger = linger;
+ if (expires < fl->linger)
+ expires = fl->linger;
+ if ((long)(fl->expires - (fl->lastuse+expires)) < 0)
+ fl->expires = fl->lastuse + expires;
+ return 0;
+}
+
+static struct ip6_flowlabel *
+fl_create(struct in6_flowlabel_req *freq, char *optval, int optlen, int *err_p)
+{
+ struct ip6_flowlabel *fl;
+ int olen;
+ int addr_type;
+ int err;
+
+ err = -ENOMEM;
+ fl = kmalloc(sizeof(*fl), GFP_KERNEL);
+ if (fl == NULL)
+ goto done;
+ memset(fl, 0, sizeof(*fl));
+
+ olen = optlen - CMSG_ALIGN(sizeof(*freq));
+ if (olen > 0) {
+ struct msghdr msg;
+ struct flowi flowi;
+ int junk;
+
+ err = -ENOMEM;
+ fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
+ if (fl->opt == NULL)
+ goto done;
+
+ memset(fl->opt, 0, sizeof(*fl->opt));
+ fl->opt->tot_len = sizeof(*fl->opt) + olen;
+ err = -EFAULT;
+ if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
+ goto done;
+
+ msg.msg_controllen = olen;
+ msg.msg_control = (void*)(fl->opt+1);
+ flowi.oif = 0;
+
+ err = datagram_send_ctl(&msg, &flowi, fl->opt, &junk);
+ if (err)
+ goto done;
+ err = -EINVAL;
+ if (fl->opt->opt_flen)
+ goto done;
+ if (fl->opt->opt_nflen == 0) {
+ kfree(fl->opt);
+ fl->opt = NULL;
+ }
+ }
+
+ fl->expires = jiffies;
+ err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
+ if (err)
+ goto done;
+ fl->share = freq->flr_share;
+ addr_type = ipv6_addr_type(&freq->flr_dst);
+ if ((addr_type&IPV6_ADDR_MAPPED)
+ || addr_type == IPV6_ADDR_ANY)
+ goto done;
+ ipv6_addr_copy(&fl->dst, &freq->flr_dst);
+ atomic_set(&fl->users, 1);
+ switch (fl->share) {
+ case IPV6_FL_S_EXCL:
+ case IPV6_FL_S_ANY:
+ break;
+ case IPV6_FL_S_PROCESS:
+ fl->owner = current->pid;
+ break;
+ case IPV6_FL_S_USER:
+ fl->owner = current->euid;
+ break;
+ default:
+ err = -EINVAL;
+ goto done;
+ }
+ return fl;
+
+done:
+ if (fl)
+ fl_free(fl);
+ *err_p = err;
+ return NULL;
+}
+
+static int mem_check(struct sock *sk)
+{
+ struct ipv6_fl_socklist *sfl;
+ int room = FL_MAX_SIZE - atomic_read(&fl_size);
+ int count = 0;
+
+ if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
+ return 0;
+
+ for (sfl = sk->net_pinfo.af_inet6.ipv6_fl_list; sfl; sfl = sfl->next)
+ count++;
+
+ if (room <= 0 ||
+ ((count >= FL_MAX_PER_SOCK ||
+ (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4)
+ && !capable(CAP_NET_ADMIN)))
+ return -ENOBUFS;
+
+ return 0;
+}
+
+static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
+{
+ if (h1 == h2)
+ return 0;
+ if (h1 == NULL || h2 == NULL)
+ return 1;
+ if (h1->hdrlen != h2->hdrlen)
+ return 1;
+ return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
+}
+
+static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
+{
+ if (o1 == o2)
+ return 0;
+ if (o1 == NULL || o2 == NULL)
+ return 1;
+ if (o1->opt_nflen != o2->opt_nflen)
+ return 1;
+ if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
+ return 1;
+ if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
+ return 1;
+ if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
+ return 1;
+ return 0;
+}
+
+int ipv6_flowlabel_opt(struct sock *sk, char *optval, int optlen)
+{
+ int err;
+ struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
+ struct in6_flowlabel_req freq;
+ struct ipv6_fl_socklist *sfl1=NULL;
+ struct ipv6_fl_socklist *sfl, **sflp;
+ struct ip6_flowlabel *fl;
+
+ if (optlen < sizeof(freq))
+ return -EINVAL;
+
+ if (copy_from_user(&freq, optval, sizeof(freq)))
+ return -EFAULT;
+
+ switch (freq.flr_action) {
+ case IPV6_FL_A_PUT:
+ for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) {
+ if (sfl->fl->label == freq.flr_label) {
+ if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
+ np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+ *sflp = sfl->next;
+ synchronize_bh();
+ fl_release(sfl->fl);
+ kfree(sfl);
+ return 0;
+ }
+ }
+ return -ESRCH;
+
+ case IPV6_FL_A_RENEW:
+ for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
+ if (sfl->fl->label == freq.flr_label)
+ return fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
+ }
+ if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
+ fl = fl_lookup(freq.flr_label);
+ if (fl) {
+ err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
+ fl_release(fl);
+ return err;
+ }
+ }
+ return -ESRCH;
+
+ case IPV6_FL_A_GET:
+ if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
+ return -EINVAL;
+
+ fl = fl_create(&freq, optval, optlen, &err);
+ if (fl == NULL)
+ return err;
+ sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+
+ if (freq.flr_label) {
+ struct ip6_flowlabel *fl1 = NULL;
+
+ err = -EEXIST;
+ for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
+ if (sfl->fl->label == freq.flr_label) {
+ if (freq.flr_flags&IPV6_FL_F_EXCL)
+ goto done;
+ fl1 = sfl->fl;
+ atomic_inc(&fl->users);
+ break;
+ }
+ }
+
+ if (fl1 == NULL)
+ fl1 = fl_lookup(freq.flr_label);
+ if (fl1) {
+ err = -EEXIST;
+ if (freq.flr_flags&IPV6_FL_F_EXCL)
+ goto release;
+ err = -EPERM;
+ if (fl1->share == IPV6_FL_S_EXCL ||
+ fl1->share != fl->share ||
+ fl1->owner != fl->owner)
+ goto release;
+
+ err = -EINVAL;
+ if (ipv6_addr_cmp(&fl1->dst, &fl->dst) ||
+ ipv6_opt_cmp(fl1->opt, fl->opt))
+ goto release;
+
+ err = -ENOMEM;
+ if (sfl1 == NULL)
+ goto release;
+ if (fl->linger > fl1->linger)
+ fl1->linger = fl->linger;
+ if ((long)(fl->expires - fl1->expires) > 0)
+ fl1->expires = fl->expires;
+ sfl1->fl = fl1;
+ sfl1->next = np->ipv6_fl_list;
+ np->ipv6_fl_list = sfl1;
+ synchronize_bh();
+ fl_free(fl);
+ return 0;
+
+release:
+ fl_release(fl1);
+ goto done;
+ }
+ }
+ err = -ENOENT;
+ if (!(freq.flr_flags&IPV6_FL_F_CREATE))
+ goto done;
+
+ err = -ENOMEM;
+ if (sfl1 == NULL || (err = mem_check(sk)) != 0)
+ goto done;
+
+ err = fl_intern(fl, freq.flr_label);
+ if (err)
+ goto done;
+
+ /* Do not check for fault */
+ if (!freq.flr_label)
+ copy_to_user(optval + ((u8*)&freq.flr_label - (u8*)&freq), &fl->label, sizeof(fl->label));
+
+ sfl1->fl = fl;
+ sfl1->next = np->ipv6_fl_list;
+ np->ipv6_fl_list = sfl1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+done:
+ if (fl)
+ fl_free(fl);
+ if (sfl1)
+ kfree(sfl1);
+ return err;
+}
+
+#ifdef CONFIG_PROC_FS
+
+
+static int ip6_fl_read_proc(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ off_t pos=0;
+ off_t begin=0;
+ int len=0;
+ int i, k;
+ struct ip6_flowlabel *fl;
+
+ len+= sprintf(buffer,"Label S Owner Users Linger Expires "
+ "Dst Opt\n");
+
+ fl_lock();
+ for (i=0; i<=FL_HASH_MASK; i++) {
+ for (fl = fl_ht[i]; fl; fl = fl->next) {
+ len+=sprintf(buffer+len,"%05X %-1d %-6d %-6d %-6d %-8ld ",
+ (unsigned)ntohl(fl->label),
+ fl->share,
+ (unsigned)fl->owner,
+ atomic_read(&fl->users),
+ fl->linger/HZ,
+ (long)(fl->expires - jiffies)/HZ);
+
+ for (k=0; k<16; k++)
+ len+=sprintf(buffer+len, "%02x", fl->dst.s6_addr[k]);
+ buffer[len++]=' ';
+ len+=sprintf(buffer+len, "%-4d", fl->opt ? fl->opt->opt_nflen : 0);
+ buffer[len++]='\n';
+
+ pos=begin+len;
+ if(pos<offset) {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ goto done;
+ }
+ }
+ *eof = 1;
+
+done:
+ fl_unlock();
+ *start=buffer+(offset-begin);
+ len-=(offset-begin);
+ if(len>length)
+ len=length;
+ if(len<0)
+ len=0;
+ return len;
+}
+#endif
+
+
+void ip6_flowlabel_init()
+{
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *ent;
+#endif
+
+ init_timer(&ip6_fl_gc_timer);
+ ip6_fl_gc_timer.function = ip6_fl_gc;
+#ifdef CONFIG_PROC_FS
+ ent = create_proc_entry("net/ip6_flowlabel", 0, 0);
+ ent->read_proc = ip6_fl_read_proc;
+#endif
+}
+
+void ip6_flowlabel_cleanup()
+{
+ del_timer(&ip6_fl_gc_timer);
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("net/ip6_flowlabel", 0);
+#endif
+}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a9dfa97ba..26ec51c4d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_output.c,v 1.15 1998/10/03 09:38:34 davem Exp $
+ * $Id: ip6_output.c,v 1.17 1999/04/22 10:07:42 davem Exp $
*
* Based on linux/net/ipv4/ip_output.c
*
@@ -77,11 +77,14 @@ int ip6_output(struct sk_buff *skb)
/* Alpha has disguisting memcpy. Help it. */
u64 *aligned_hdr = (u64*)(skb->data - 16);
u64 *aligned_hdr0 = hh->hh_data;
+ read_lock_irq(&hh->hh_lock);
aligned_hdr[0] = aligned_hdr0[0];
aligned_hdr[1] = aligned_hdr0[1];
#else
+ read_lock_irq(&hh->hh_lock);
memcpy(skb->data - 16, hh->hh_data, 16);
#endif
+ read_unlock_irq(&hh->hh_lock);
skb_push(skb, dev->hard_header_len);
return hh->hh_output(skb);
} else if (dst->neighbour)
@@ -137,16 +140,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
* Fill in the IPv6 header
*/
- hdr->version = 6;
- if (np) {
- hdr->priority = np->priority;
- memcpy(hdr->flow_lbl, (void *) &np->flow_lbl, 3);
+ *(u32*)hdr = __constant_htonl(0x60000000) | fl->fl6_flowlabel;
+ hlimit = -1;
+ if (np)
hlimit = np->hop_limit;
- } else {
- hdr->priority = 0;
- memset(hdr->flow_lbl, 0, 3);
- hlimit = -1;
- }
if (hlimit < 0)
hlimit = ((struct rt6_info*)dst)->rt6i_hoplimit;
@@ -164,7 +161,9 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
}
printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
+ start_bh_atomic();
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev);
+ end_bh_atomic();
kfree_skb(skb);
return -EMSGSIZE;
}
@@ -192,9 +191,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct device *dev,
hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
skb->nh.ipv6h = hdr;
- hdr->version = 6;
- hdr->priority = np->priority & 0x0f;
- memset(hdr->flow_lbl, 0, 3);
+ *(u32*)hdr = htonl(0x60000000);
hdr->payload_len = htons(len);
hdr->nexthdr = proto;
@@ -209,16 +206,13 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct device *dev,
static struct ipv6hdr * ip6_bld_1(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
int hlimit, unsigned pktlength)
{
- struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct ipv6hdr *hdr;
skb->nh.raw = skb_put(skb, sizeof(struct ipv6hdr));
hdr = skb->nh.ipv6h;
- hdr->version = 6;
- hdr->priority = np->priority;
- memcpy(hdr->flow_lbl, &np->flow_lbl, 3);
-
+ *(u32*)hdr = fl->fl6_flowlabel | htonl(0x60000000);
+
hdr->payload_len = htons(pktlength - sizeof(struct ipv6hdr));
hdr->hop_limit = hlimit;
hdr->nexthdr = fl->proto;
@@ -427,11 +421,12 @@ int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
struct dst_entry *dst;
int err = 0;
unsigned int pktlength, jumbolen, mtu;
+ struct in6_addr saddr;
if (opt && opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
- final_dst = fl->nl_u.ip6_u.daddr;
- fl->nl_u.ip6_u.daddr = rt0->addr;
+ final_dst = fl->fl6_dst;
+ fl->fl6_dst = rt0->addr;
}
if (!fl->oif && ipv6_addr_is_multicast(fl->nl_u.ip6_u.daddr))
@@ -480,25 +475,22 @@ int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
return -ENETUNREACH;
}
- if (fl->nl_u.ip6_u.saddr == NULL) {
- struct inet6_ifaddr *ifa;
-
- ifa = ipv6_get_saddr(dst, fl->nl_u.ip6_u.daddr);
+ if (fl->fl6_src == NULL) {
+ err = ipv6_get_saddr(dst, fl->fl6_dst, &saddr);
- if (ifa == NULL) {
+ if (err) {
#if IP6_DEBUG >= 2
printk(KERN_DEBUG "ip6_build_xmit: "
"no availiable source address\n");
#endif
- err = -ENETUNREACH;
goto out;
}
- fl->nl_u.ip6_u.saddr = &ifa->addr;
+ fl->fl6_src = &saddr;
}
pktlength = length;
if (hlimit < 0) {
- if (ipv6_addr_is_multicast(fl->nl_u.ip6_u.daddr))
+ if (ipv6_addr_is_multicast(fl->fl6_dst))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4b8089d4a..20de5bb2e 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -7,7 +7,7 @@
*
* Based on linux/net/ipv4/ip_sockglue.c
*
- * $Id: ipv6_sockglue.c,v 1.24 1998/10/03 09:38:37 davem Exp $
+ * $Id: ipv6_sockglue.c,v 1.27 1999/04/22 10:07:43 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -86,7 +86,10 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *))
kfree(new_ra);
return -EADDRINUSE;
}
+
*rap = ra->next;
+ synchronize_bh();
+
if (ra->destructor)
ra->destructor(sk);
kfree(ra);
@@ -136,17 +139,21 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
if (sk->protocol != IPPROTO_UDP &&
sk->protocol != IPPROTO_TCP)
goto out;
-
+
+ lock_sock(sk);
if (sk->state != TCP_ESTABLISHED) {
retv = ENOTCONN;
- goto out;
+ goto addrform_done;
}
if (!(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) {
retv = -EADDRNOTAVAIL;
- goto out;
+ goto addrform_done;
}
+ fl6_free_socklist(sk);
+ ipv6_sock_mc_close(sk);
+
if (sk->protocol == IPPROTO_TCP) {
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
@@ -166,6 +173,9 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
if (pktopt)
kfree_skb(pktopt);
retv = 0;
+
+addrform_done:
+ release_sock(sk);
} else {
retv = -EINVAL;
}
@@ -204,12 +214,19 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
retv = 0;
break;
+ case IPV6_FLOWINFO:
+ np->rxopt.bits.rxflow = valbool;
+ return 0;
+
case IPV6_PKTOPTIONS:
{
struct ipv6_txoptions *opt = NULL;
struct msghdr msg;
+ struct flowi fl;
int junk;
- struct in6_addr *saddr;
+
+ fl.fl6_flowlabel = 0;
+ fl.oif = sk->bound_dev_if;
if (optlen == 0)
goto update;
@@ -228,7 +245,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
msg.msg_controllen = optlen;
msg.msg_control = (void*)(opt+1);
- retv = datagram_send_ctl(&msg, &junk, &saddr, opt, &junk);
+ retv = datagram_send_ctl(&msg, &fl, opt, &junk);
if (retv)
goto done;
update:
@@ -314,10 +331,15 @@ done:
np->frag_size = val;
return 0;
case IPV6_RECVERR:
- np->recverr = !!val;
+ np->recverr = valbool;
if (!val)
skb_queue_purge(&sk->error_queue);
return 0;
+ case IPV6_FLOWINFO_SEND:
+ np->sndflow = valbool;
+ return 0;
+ case IPV6_FLOWLABEL_MGR:
+ return ipv6_flowlabel_opt(sk, optval, optlen);
};
out:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 88950481e..939d268da 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: mcast.c,v 1.17 1998/08/26 12:05:06 davem Exp $
+ * $Id: mcast.c,v 1.19 1999/03/25 10:04:50 davem Exp $
*
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
*
@@ -132,7 +132,10 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
if (mc_lst->ifindex == ifindex &&
ipv6_addr_cmp(&mc_lst->addr, addr) == 0) {
struct device *dev;
+
*lnk = mc_lst->next;
+ synchronize_bh();
+
if ((dev = dev_get_by_index(ifindex)) != NULL)
ipv6_dev_mc_dec(dev, &mc_lst->addr);
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -253,6 +256,7 @@ static void ipv6_mca_remove(struct device *dev, struct ifmcaddr6 *ma)
for (lnk = &idev->mc_list; (iter = *lnk) != NULL; lnk = &iter->if_next) {
if (iter == ma) {
*lnk = iter->if_next;
+ synchronize_bh();
return;
}
}
@@ -273,7 +277,10 @@ int ipv6_dev_mc_dec(struct device *dev, struct in6_addr *addr)
if (ipv6_addr_cmp(&ma->mca_addr, addr) == 0 && ma->dev == dev) {
if (atomic_dec_and_test(&ma->mca_users)) {
igmp6_group_dropped(ma);
+
*lnk = ma->next;
+ synchronize_bh();
+
ipv6_mca_remove(dev, ma);
kfree(ma);
}
@@ -496,10 +503,10 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
if ((addr_type & (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK)))
return;
+ start_bh_atomic();
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);
delay = net_random() % IGMP6_UNSOLICITED_IVAL;
- start_bh_atomic();
if (del_timer(&ma->mca_timer))
delay = ma->mca_timer.expires - jiffies;
@@ -519,11 +526,13 @@ static void igmp6_leave_group(struct ifmcaddr6 *ma)
if ((addr_type & IPV6_ADDR_LINKLOCAL))
return;
+ start_bh_atomic();
if (ma->mca_flags & MAF_LAST_REPORTER)
igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REDUCTION);
if (ma->mca_flags & MAF_TIMER_RUNNING)
del_timer(&ma->mca_timer);
+ end_bh_atomic();
}
void igmp6_timer_handler(unsigned long data)
@@ -577,10 +586,21 @@ void ipv6_mc_up(struct inet6_dev *idev)
void ipv6_mc_destroy_dev(struct inet6_dev *idev)
{
- struct ifmcaddr6 *i;
+ int hash;
+ struct ifmcaddr6 *i, **lnk;
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->if_next;
+
+ hash = ipv6_addr_hash(&i->mca_addr);
+
+ for (lnk = &inet6_mcast_lst[hash]; *lnk; lnk = &(*lnk)->next) {
+ if (*lnk == i) {
+ *lnk = i->next;
+ synchronize_bh();
+ break;
+ }
+ }
igmp6_group_dropped(i);
kfree(i);
}
@@ -631,6 +651,8 @@ done:
len-=(offset-begin);
if(len>length)
len=length;
+ if (len<0)
+ len=0;
return len;
}
#endif
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c21e48d80..bb5e08373 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -335,7 +335,7 @@ void ndisc_send_na(struct device *dev, struct neighbour *neigh,
msg->icmph.icmp6_unused = 0;
msg->icmph.icmp6_router = router;
msg->icmph.icmp6_solicited = solicited;
- msg->icmph.icmp6_override = override;
+ msg->icmph.icmp6_override = !!override;
/* Set the target address. */
ipv6_addr_copy(&msg->target, solicited_addr);
@@ -497,7 +497,7 @@ static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
* "The sender MUST return an ICMP
* destination unreachable"
*/
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
+ dst_link_failure(skb);
kfree_skb(skb);
}
@@ -604,6 +604,13 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
neigh->flags |= NTF_ROUTER;
+
+ /*
+ * If we where using an "all destinations on link" route
+ * delete it
+ */
+
+ rt6_purge_dflt_routers(RTF_ALLONLINK);
}
if (rt)
@@ -806,7 +813,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
}
}
- rd_len = min(IPV6_MIN_MTU-sizeof(struct ipv6hdr)-len, ntohs(skb->nh.ipv6h->payload_len) + 8);
+ rd_len = min(IPV6_MIN_MTU-sizeof(struct ipv6hdr)-len, skb->len + 8);
rd_len &= ~0x7;
len += rd_len;
@@ -866,7 +873,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
*(opt++) = (rd_len >> 3);
opt += 6;
- memcpy(opt, &skb->nh.ipv6h, rd_len - 8);
+ memcpy(opt, skb->nh.ipv6h, rd_len - 8);
icmph->icmp6_cksum = csum_ipv6_magic(&ifp->addr, &skb->nh.ipv6h->saddr,
len, IPPROTO_ICMPV6,
@@ -989,7 +996,7 @@ int ndisc_rcv(struct sk_buff *skb, unsigned long len)
if (neigh) {
ndisc_send_na(dev, neigh, saddr, &msg->target,
- 0, 0, inc, inc);
+ 0, 1, 0, inc);
neigh_release(neigh);
}
} else {
@@ -1173,7 +1180,6 @@ __initfunc(int ndisc_init(struct net_proto_family *ops))
sk = ndisc_socket->sk;
sk->allocation = GFP_ATOMIC;
sk->net_pinfo.af_inet6.hop_limit = 255;
- sk->net_pinfo.af_inet6.priority = 15;
/* Do not loopback ndisc messages */
sk->net_pinfo.af_inet6.mc_loop = 0;
sk->num = 256;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 3b02e06d9..f82ac33db 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -7,7 +7,7 @@
*
* Adapted from linux/net/ipv4/raw.c
*
- * $Id: raw.c,v 1.23 1998/11/08 11:17:09 davem Exp $
+ * $Id: raw.c,v 1.24 1999/04/22 10:07:45 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -283,6 +283,7 @@ int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
sizeof(struct in6_addr));
+ sin6->sin6_flowinfo = 0;
}
if (sk->net_pinfo.af_inet6.rxopt.all)
@@ -363,7 +364,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct ipv6_txoptions *opt = NULL;
- struct in6_addr *saddr = NULL;
+ struct ip6_flowlabel *flowlabel = NULL;
struct flowi fl;
int addr_len = msg->msg_namelen;
struct in6_addr *daddr;
@@ -388,6 +389,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
* Get and verify the address.
*/
+ fl.fl6_flowlabel = 0;
+
if (sin6) {
if (addr_len < sizeof(struct sockaddr_in6))
return(-EINVAL);
@@ -405,12 +408,28 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
return(-EINVAL);
daddr = &sin6->sin6_addr;
+ if (np->sndflow) {
+ fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ daddr = &flowlabel->dst;
+ }
+ }
+
+
+ /* Otherwise it will be difficult to maintain sk->dst_cache. */
+ if (sk->state == TCP_ESTABLISHED &&
+ !ipv6_addr_cmp(daddr, &sk->net_pinfo.af_inet6.daddr))
+ daddr = &sk->net_pinfo.af_inet6.daddr;
} else {
if (sk->state != TCP_ESTABLISHED)
return(-EINVAL);
proto = sk->num;
daddr = &(sk->net_pinfo.af_inet6.daddr);
+ fl.fl6_flowlabel = np->flow_label;
}
if (ipv6_addr_any(daddr)) {
@@ -422,23 +441,34 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
}
fl.oif = sk->bound_dev_if;
+ fl.fl6_src = NULL;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
- err = datagram_send_ctl(msg, &fl.oif, &saddr, opt, &hlimit);
- if (err < 0)
+ err = datagram_send_ctl(msg, &fl, opt, &hlimit);
+ if (err < 0) {
+ fl6_sock_release(flowlabel);
return err;
+ }
+ if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ }
+ if (!(opt->opt_nflen|opt->opt_flen))
+ opt = NULL;
}
- if (opt == NULL || !(opt->opt_nflen|opt->opt_flen))
+ if (opt == NULL)
opt = np->opt;
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
raw_opt = &sk->tp_pinfo.tp_raw;
fl.proto = proto;
- fl.nl_u.ip6_u.daddr = daddr;
- fl.nl_u.ip6_u.saddr = saddr;
+ fl.fl6_dst = daddr;
fl.uli_u.icmpt.type = 0;
fl.uli_u.icmpt.code = 0;
@@ -463,6 +493,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
opt, hlimit, msg->msg_flags);
}
+ fl6_sock_release(flowlabel);
+
return err<0?err:len;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9ae8f63d7..04b49d843 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: route.c,v 1.34 1998/10/03 09:38:43 davem Exp $
+ * $Id: route.c,v 1.35 1999/03/21 05:22:57 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -71,6 +71,7 @@ int ip6_rt_gc_min_interval = 5*HZ;
int ip6_rt_gc_timeout = 60*HZ;
int ip6_rt_gc_interval = 30*HZ;
int ip6_rt_gc_elasticity = 9;
+int ip6_rt_mtu_expires = 10*60*HZ;
static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
@@ -97,7 +98,7 @@ struct dst_ops ip6_dst_ops = {
struct rt6_info ip6_null_entry = {
{{NULL, ATOMIC_INIT(1), ATOMIC_INIT(1), &loopback_dev,
- -1, 0, 0, 0, 0, 0, 0, 0,
+ -1, 0, 0, 0, 0, 0, 0, 0, 0,
-ENETUNREACH, NULL, NULL,
ip6_pkt_discard, ip6_pkt_discard,
#ifdef CONFIG_NET_CLS_ROUTE
@@ -105,7 +106,7 @@ struct rt6_info ip6_null_entry = {
#endif
&ip6_dst_ops}},
NULL, {{{0}}}, RTF_REJECT|RTF_NONEXTHOP, ~0U,
- 255, 0, ATOMIC_INIT(1), {NULL}, {{{{0}}}, 0}, {{{{0}}}, 0}
+ 255, ATOMIC_INIT(1), {NULL}, {{{{0}}}, 0}, {{{{0}}}, 0}
};
struct fib6_node ip6_routing_table = {
@@ -515,13 +516,30 @@ static struct dst_entry *ip6_dst_reroute(struct dst_entry *dst, struct sk_buff *
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
{
- dst_release(dst);
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ if (rt) {
+ if (rt->rt6i_flags & RTF_CACHE)
+ ip6_del_rt(rt);
+ dst_release(dst);
+ }
return NULL;
}
static void ip6_link_failure(struct sk_buff *skb)
{
+ struct rt6_info *rt;
+
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
+
+ rt = (struct rt6_info *) skb->dst;
+ if (rt) {
+ if (rt->rt6i_flags&RTF_CACHE) {
+ dst_set_expires(&rt->u.dst, 0);
+ rt->rt6i_flags |= RTF_EXPIRES;
+ } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
+ rt->rt6i_node->fn_sernum = -1;
+ }
}
static int ip6_dst_gc()
@@ -1009,12 +1027,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
when cache entry will expire old pmtu
would return automatically.
*/
- if (rt->rt6i_dst.plen == 128) {
- /*
- * host route
- */
+ if (rt->rt6i_flags & RTF_CACHE) {
rt->u.dst.pmtu = pmtu;
- rt->rt6i_flags |= RTF_MODIFIED;
+ dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
+ rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
goto out;
}
@@ -1025,9 +1041,12 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
*/
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
nrt = rt6_cow(rt, daddr, saddr);
- nrt->u.dst.pmtu = pmtu;
- nrt->rt6i_flags |= RTF_DYNAMIC;
- dst_release(&nrt->u.dst);
+ if (!nrt->u.dst.error) {
+ nrt->u.dst.pmtu = pmtu;
+ dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
+ nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
+ dst_release(&nrt->u.dst);
+ }
} else {
nrt = ip6_rt_copy(rt);
if (nrt == NULL)
@@ -1035,7 +1054,8 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
ipv6_addr_copy(&nrt->rt6i_dst.addr, daddr);
nrt->rt6i_dst.plen = 128;
nrt->rt6i_nexthop = neigh_clone(rt->rt6i_nexthop);
- nrt->rt6i_flags |= (RTF_DYNAMIC | RTF_CACHE);
+ dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
+ nrt->rt6i_flags |= RTF_DYNAMIC|RTF_CACHE|RTF_EXPIRES;
nrt->u.dst.pmtu = pmtu;
rt6_ins(nrt);
}
@@ -1069,7 +1089,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
- rt->rt6i_metric = ort->rt6i_metric;
+ rt->rt6i_metric = 0;
memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
#ifdef CONFIG_IPV6_SUBTREES
@@ -1521,9 +1541,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
if (iif)
RTA_PUT(skb, RTA_IIF, 4, &iif);
else if (dst) {
- struct inet6_ifaddr *ifp = ipv6_get_saddr(&rt->u.dst, dst);
- if (ifp)
- RTA_PUT(skb, RTA_PREFSRC, 16, &ifp->addr);
+ struct in6_addr saddr_buf;
+ if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf))
+ RTA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
mx = (struct rtattr*)skb->tail;
RTA_PUT(skb, RTA_METRICS, 0, NULL);
@@ -1722,7 +1742,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt)
struct sk_buff *skb;
int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
- skb = alloc_skb(size, GFP_ATOMIC);
+ skb = alloc_skb(size, gfp_any());
if (!skb) {
netlink_set_err(rtnl, 0, RTMGRP_IPV6_ROUTE, ENOBUFS);
return;
@@ -1733,7 +1753,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt)
return;
}
NETLINK_CB(skb).dst_groups = RTMGRP_IPV6_ROUTE;
- netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_ROUTE, GFP_ATOMIC);
+ netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_ROUTE, gfp_any());
}
#endif
@@ -1916,6 +1936,9 @@ ctl_table ipv6_route_table[] = {
{NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity",
&ip6_rt_gc_elasticity, sizeof(int), 0644, NULL,
&proc_dointvec_jiffies},
+ {NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires",
+ &ip6_rt_mtu_expires, sizeof(int), 0644, NULL,
+ &proc_dointvec_jiffies},
{0}
};
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 850553d9d..a1d888c98 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -6,7 +6,7 @@
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
- * $Id: sit.c,v 1.29 1998/10/03 09:38:47 davem Exp $
+ * $Id: sit.c,v 1.31 1999/03/25 10:04:55 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -94,6 +94,46 @@ static struct ip_tunnel * ipip6_tunnel_lookup(u32 remote, u32 local)
return NULL;
}
+static struct ip_tunnel ** ipip6_bucket(struct ip_tunnel *t)
+{
+ u32 remote = t->parms.iph.daddr;
+ u32 local = t->parms.iph.saddr;
+ unsigned h = 0;
+ int prio = 0;
+
+ if (remote) {
+ prio |= 2;
+ h ^= HASH(remote);
+ }
+ if (local) {
+ prio |= 1;
+ h ^= HASH(local);
+ }
+ return &tunnels[prio][h];
+}
+
+static void ipip6_tunnel_unlink(struct ip_tunnel *t)
+{
+ struct ip_tunnel **tp;
+
+ for (tp = ipip6_bucket(t); *tp; tp = &(*tp)->next) {
+ if (t == *tp) {
+ *tp = t->next;
+ synchronize_bh();
+ break;
+ }
+ }
+}
+
+static void ipip6_tunnel_link(struct ip_tunnel *t)
+{
+ struct ip_tunnel **tp = ipip6_bucket(t);
+
+ t->next = *tp;
+ wmb();
+ *tp = t;
+}
+
struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create)
{
u32 remote = parms->iph.daddr;
@@ -145,10 +185,7 @@ struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create)
if (register_netdevice(dev) < 0)
goto failed;
- start_bh_atomic();
- nt->next = t;
- *tp = nt;
- end_bh_atomic();
+ ipip6_tunnel_link(nt);
/* Do not decrement MOD_USE_COUNT here. */
return nt;
@@ -160,37 +197,17 @@ failed:
static void ipip6_tunnel_destroy(struct device *dev)
{
- struct ip_tunnel *t, **tp;
- struct ip_tunnel *t0 = (struct ip_tunnel*)dev->priv;
- u32 remote = t0->parms.iph.daddr;
- u32 local = t0->parms.iph.saddr;
- unsigned h = 0;
- int prio = 0;
-
if (dev == &ipip6_fb_tunnel_dev) {
tunnels_wc[0] = NULL;
+ synchronize_bh();
return;
- }
-
- if (remote) {
- prio |= 2;
- h ^= HASH(remote);
- }
- if (local) {
- prio |= 1;
- h ^= HASH(local);
- }
- for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
- if (t == t0) {
- *tp = t->next;
- kfree(dev);
- MOD_DEC_USE_COUNT;
- break;
- }
+ } else {
+ ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
}
}
-
void ipip6_err(struct sk_buff *skb, unsigned char *dp, int len)
{
#ifndef I_WISH_WORLD_WERE_PERFECT
@@ -571,6 +588,32 @@ ipip6_tunnel_ioctl (struct device *dev, struct ifreq *ifr, int cmd)
t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
+ if (dev != &ipip6_fb_tunnel_dev && cmd == SIOCCHGTUNNEL &&
+ t != &ipip6_fb_tunnel) {
+ if (t != NULL) {
+ if (t->dev != dev) {
+ err = -EEXIST;
+ break;
+ }
+ } else {
+ if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
+ (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
+ err = -EINVAL;
+ break;
+ }
+ t = (struct ip_tunnel*)dev->priv;
+ start_bh_atomic();
+ ipip6_tunnel_unlink(t);
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ ipip6_tunnel_link(t);
+ end_bh_atomic();
+ netdev_state_change(dev);
+ }
+ }
+
if (t) {
err = 0;
if (cmd == SIOCCHGTUNNEL) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a95698db5..f1ef74de8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -5,7 +5,7 @@
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: tcp_ipv6.c,v 1.94 1998/11/07 11:50:33 davem Exp $
+ * $Id: tcp_ipv6.c,v 1.104 1999/04/24 00:27:25 davem Exp $
*
* Based on:
* linux/net/ipv4/tcp.c
@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -376,12 +377,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- struct inet6_ifaddr *ifa;
struct in6_addr *saddr = NULL;
+ struct in6_addr saddr_buf;
struct flowi fl;
struct dst_entry *dst;
struct sk_buff *buff;
int addr_type;
+ int err;
if (sk->state != TCP_CLOSE)
return(-EISCONN);
@@ -399,6 +401,19 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (usin->sin6_family && usin->sin6_family != AF_INET6)
return(-EAFNOSUPPORT);
+ fl.fl6_flowlabel = 0;
+ if (np->sndflow) {
+ fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ struct ip6_flowlabel *flowlabel;
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ fl6_sock_release(flowlabel);
+ }
+ }
+
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
@@ -420,6 +435,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return (-EINVAL);
memcpy(&np->daddr, &usin->sin6_addr, sizeof(struct in6_addr));
+ np->flow_label = fl.fl6_flowlabel;
/*
* TCP over IPv4
@@ -428,7 +444,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (addr_type == IPV6_ADDR_MAPPED) {
u32 exthdrlen = tp->ext_header_len;
struct sockaddr_in sin;
- int err;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
@@ -445,6 +460,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tp->ext_header_len = exthdrlen;
sk->tp_pinfo.af_tcp.af_specific = &ipv6_specific;
sk->backlog_rcv = tcp_v6_do_rcv;
+ goto failure;
} else {
ipv6_addr_set(&np->saddr, 0, 0, __constant_htonl(0x0000FFFF),
sk->saddr);
@@ -459,8 +475,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
saddr = &np->rcv_saddr;
fl.proto = IPPROTO_TCP;
- fl.nl_u.ip6_u.daddr = &np->daddr;
- fl.nl_u.ip6_u.saddr = saddr;
+ fl.fl6_dst = &np->daddr;
+ fl.fl6_src = saddr;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = usin->sin6_port;
fl.uli_u.ports.sport = sk->sport;
@@ -472,9 +488,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
dst = ip6_route_output(sk, &fl);
- if (dst->error) {
+ if ((err = dst->error) != 0) {
dst_release(dst);
- return dst->error;
+ goto failure;
}
if (fl.oif == 0 && addr_type&IPV6_ADDR_LINKLOCAL) {
@@ -489,35 +505,36 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
ip6_dst_store(sk, dst, NULL);
if (saddr == NULL) {
- ifa = ipv6_get_saddr(dst, &np->daddr);
-
- if (ifa == NULL)
- return -ENETUNREACH;
-
- saddr = &ifa->addr;
+ err = ipv6_get_saddr(dst, &np->daddr, &saddr_buf);
+ if (err)
+ goto failure;
- /* set the source address */
- ipv6_addr_copy(&np->rcv_saddr, saddr);
- ipv6_addr_copy(&np->saddr, saddr);
+ saddr = &saddr_buf;
}
+ /* set the source address */
+ ipv6_addr_copy(&np->rcv_saddr, saddr);
+ ipv6_addr_copy(&np->saddr, saddr);
+
tp->ext_header_len = 0;
if (np->opt)
tp->ext_header_len = np->opt->opt_flen+np->opt->opt_nflen;
/* Reset mss clamp */
tp->mss_clamp = ~0;
+ err = -ENOBUFS;
buff = sock_wmalloc(sk, (MAX_HEADER + sk->prot->max_header),
0, GFP_KERNEL);
if (buff == NULL)
- return -ENOBUFS;
+ goto failure;
sk->dport = usin->sin6_port;
if (!tcp_v6_unique_address(sk)) {
kfree_skb(buff);
- return -EADDRNOTAVAIL;
+ err = -EADDRNOTAVAIL;
+ goto failure;
}
/*
@@ -531,11 +548,16 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tcp_connect(sk, buff, dst->pmtu);
return 0;
+
+failure:
+ dst_release(xchg(&sk->dst_cache, NULL));
+ memcpy(&np->daddr, 0, sizeof(struct in6_addr));
+ sk->daddr = 0;
+ return err;
}
static int tcp_v6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
{
- struct tcp_opt *tp;
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
int retval = -EINVAL;
@@ -562,16 +584,11 @@ static int tcp_v6_sendmsg(struct sock *sk, struct msghdr *msg, int len)
goto out;
if (ipv6_addr_cmp(&addr->sin6_addr, &np->daddr))
goto out;
+ if (np->sndflow && np->flow_label != (addr->sin6_flowinfo&IPV6_FLOWINFO_MASK))
+ goto out;
}
- lock_sock(sk);
- retval = tcp_do_sendmsg(sk, msg->msg_iovlen, msg->msg_iov,
- msg->msg_flags);
- /* Push out partial tail frames if needed. */
- tp = &(sk->tp_pinfo.af_tcp);
- if(tp->send_head && tcp_snd_test(sk, tp->send_head))
- tcp_write_xmit(sk);
- release_sock(sk);
+ retval = tcp_do_sendmsg(sk, msg);
out:
return retval;
@@ -610,11 +627,14 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
np = &sk->net_pinfo.af_inet6;
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
- /* icmp should have updated the destination cache entry */
+
+ if (atomic_read(&sk->sock_readers))
+ return;
if (sk->state == TCP_LISTEN)
return;
+ /* icmp should have updated the destination cache entry */
if (sk->dst_cache)
dst = dst_check(&sk->dst_cache, np->dst_cookie);
@@ -639,12 +659,9 @@ void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
if (dst->error) {
sk->err_soft = -dst->error;
- } else if (tp->pmtu_cookie > dst->pmtu
- && !atomic_read(&sk->sock_readers)) {
- lock_sock(sk);
+ } else if (tp->pmtu_cookie > dst->pmtu) {
tcp_sync_mss(sk, dst->pmtu);
tcp_simple_retransmit(sk);
- release_sock(sk);
} /* else let the usual retransmit timer handle it */
dst_release(dst);
return;
@@ -720,6 +737,7 @@ static void tcp_v6_send_synack(struct sock *sk, struct open_request *req)
fl.proto = IPPROTO_TCP;
fl.nl_u.ip6_u.daddr = &req->af.v6_req.rmt_addr;
fl.nl_u.ip6_u.saddr = &req->af.v6_req.loc_addr;
+ fl.fl6_flowlabel = 0;
fl.oif = req->af.v6_req.iif;
fl.uli_u.ports.dport = req->rmt_port;
fl.uli_u.ports.sport = sk->sport;
@@ -783,6 +801,8 @@ static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
if (sk->net_pinfo.af_inet6.rxopt.all) {
if ((opt->hop && sk->net_pinfo.af_inet6.rxopt.bits.hopopts) ||
+ ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) &&
+ sk->net_pinfo.af_inet6.rxopt.bits.rxflow) ||
(opt->srcrt && sk->net_pinfo.af_inet6.rxopt.bits.srcrt) ||
((opt->dst1 || opt->dst0) && sk->net_pinfo.af_inet6.rxopt.bits.dstopts))
return 1;
@@ -961,6 +981,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
fl.nl_u.ip6_u.daddr = rt0->addr;
}
fl.nl_u.ip6_u.saddr = &req->af.v6_req.loc_addr;
+ fl.fl6_flowlabel = 0;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = req->rmt_port;
fl.uli_u.ports.sport = sk->sport;
@@ -1086,6 +1107,7 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
fl.nl_u.ip6_u.daddr = &skb->nh.ipv6h->saddr;
fl.nl_u.ip6_u.saddr = &skb->nh.ipv6h->daddr;
+ fl.fl6_flowlabel = 0;
t1->check = csum_ipv6_magic(fl.nl_u.ip6_u.saddr,
fl.nl_u.ip6_u.daddr,
@@ -1190,6 +1212,9 @@ static inline struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
+#ifdef CONFIG_FILTER
+ struct sk_filter *filter;
+#endif
int users = 0;
/* Imagine: socket is IPv6. IPv4 packet arrives,
@@ -1203,6 +1228,12 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == __constant_htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
+#ifdef CONFIG_FILTER
+ filter = sk->filter;
+ if (filter && sk_filter(skb, filter))
+ goto discard;
+#endif /* CONFIG_FILTER */
+
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
@@ -1210,11 +1241,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
ipv6_statistics.Ip6InDelivers++;
- /* XXX We need to think more about socket locking
- * XXX wrt. backlog queues, __release_sock(), etc. -DaveM
- */
- lock_sock(sk);
-
/*
* This doesn't check if the socket has enough room for the packet.
* Either process the packet _without_ queueing it and then free it,
@@ -1245,7 +1271,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
goto reset;
if (users)
goto ipv6_pktoptions;
- release_sock(sk);
return 0;
}
@@ -1255,8 +1280,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
nsk = tcp_v6_hnd_req(sk, skb);
if (!nsk)
goto discard;
- lock_sock(nsk);
- release_sock(sk);
+
+ /*
+ * Queue it on the new socket if the new socket is active,
+ * otherwise we just shortcircuit this and continue with
+ * the new socket..
+ */
+ if (atomic_read(&nsk->sock_readers)) {
+ skb_orphan(skb);
+ __skb_queue_tail(&nsk->back_log, skb);
+ return 0;
+ }
sk = nsk;
}
@@ -1264,7 +1298,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
goto reset;
if (users)
goto ipv6_pktoptions;
- release_sock(sk);
return 0;
reset:
@@ -1273,7 +1306,6 @@ discard:
if (users)
kfree_skb(skb);
kfree_skb(skb);
- release_sock(sk);
return 0;
ipv6_pktoptions:
@@ -1303,7 +1335,6 @@ ipv6_pktoptions:
if (skb)
kfree_skb(skb);
- release_sock(sk);
return 0;
}
@@ -1404,6 +1435,7 @@ static int tcp_v6_rebuild_header(struct sock *sk)
fl.proto = IPPROTO_TCP;
fl.nl_u.ip6_u.daddr = &np->daddr;
fl.nl_u.ip6_u.saddr = &np->saddr;
+ fl.fl6_flowlabel = np->flow_label;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = sk->dport;
fl.uli_u.ports.sport = sk->sport;
@@ -1432,6 +1464,9 @@ static struct sock * tcp_v6_get_sock(struct sk_buff *skb, struct tcphdr *th)
struct in6_addr *saddr;
struct in6_addr *daddr;
+ if (skb->protocol == __constant_htons(ETH_P_IP))
+ return ipv4_specific.get_sock(skb, th);
+
saddr = &skb->nh.ipv6h->saddr;
daddr = &skb->nh.ipv6h->daddr;
return tcp_v6_lookup(saddr, th->source, daddr, th->dest, tcp_v6_iif(skb));
@@ -1445,8 +1480,9 @@ static void tcp_v6_xmit(struct sk_buff *skb)
struct dst_entry *dst = sk->dst_cache;
fl.proto = IPPROTO_TCP;
- fl.nl_u.ip6_u.daddr = &np->daddr;
- fl.nl_u.ip6_u.saddr = &np->saddr;
+ fl.fl6_dst = &np->daddr;
+ fl.fl6_src = &np->saddr;
+ fl.fl6_flowlabel = np->flow_label;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.sport = sk->sport;
fl.uli_u.ports.dport = sk->dport;
@@ -1487,6 +1523,8 @@ static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, &np->daddr, sizeof(struct in6_addr));
sin6->sin6_port = sk->dport;
+ /* We do not store received flowlabel for TCP */
+ sin6->sin6_flowinfo = 0;
}
static struct tcp_func ipv6_specific = {
@@ -1537,10 +1575,16 @@ static int tcp_v6_init_sock(struct sock *sk)
tp->mdev = TCP_TIMEOUT_INIT;
tp->mss_clamp = ~0;
+ /* So many TCP implementations out there (incorrectly) count the
+ * initial SYN frame in their delayed-ACK and congestion control
+ * algorithms that we must have the following bandaid to talk
+ * efficiently to them. -DaveM
+ */
+ tp->snd_cwnd = 2;
+
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
- tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_ssthresh = 0x7fffffff;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 0670e8758..5b4d55f9e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -7,7 +7,7 @@
*
* Based on linux/ipv4/udp.c
*
- * $Id: udp.c,v 1.37 1998/11/08 11:17:10 davem Exp $
+ * $Id: udp.c,v 1.40 1999/05/08 20:00:32 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -201,9 +201,10 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct in6_addr *daddr;
+ struct in6_addr saddr;
struct dst_entry *dst;
- struct inet6_ifaddr *ifa;
struct flowi fl;
+ struct ip6_flowlabel *flowlabel = NULL;
int addr_type;
int err;
@@ -218,6 +219,17 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (usin->sin6_family && usin->sin6_family != AF_INET6)
return(-EAFNOSUPPORT);
+ fl.fl6_flowlabel = 0;
+ if (np->sndflow) {
+ fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ }
+ }
+
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type == IPV6_ADDR_ANY) {
@@ -262,6 +274,7 @@ ipv4_connected:
}
ipv6_addr_copy(&np->daddr, daddr);
+ np->flow_label = fl.fl6_flowlabel;
sk->dport = usin->sin6_port;
@@ -271,41 +284,49 @@ ipv4_connected:
*/
fl.proto = IPPROTO_UDP;
- fl.nl_u.ip6_u.daddr = &np->daddr;
- fl.nl_u.ip6_u.saddr = NULL;
+ fl.fl6_dst = &np->daddr;
+ fl.fl6_src = &saddr;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = sk->dport;
fl.uli_u.ports.sport = sk->sport;
- if (np->opt && np->opt->srcrt) {
+ if (flowlabel) {
+ if (flowlabel->opt && flowlabel->opt->srcrt) {
+ struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
+ fl.fl6_dst = rt0->addr;
+ }
+ } else if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
- fl.nl_u.ip6_u.daddr = rt0->addr;
+ fl.fl6_dst = rt0->addr;
}
dst = ip6_route_output(sk, &fl);
- if (dst->error) {
+ if ((err = dst->error) != 0) {
dst_release(dst);
- return dst->error;
+ fl6_sock_release(flowlabel);
+ return err;
}
- ip6_dst_store(sk, dst, fl.nl_u.ip6_u.daddr);
+ ip6_dst_store(sk, dst, fl.fl6_dst);
/* get the source adddress used in the apropriate device */
- ifa = ipv6_get_saddr(dst, daddr);
+ err = ipv6_get_saddr(dst, daddr, &saddr);
- if(ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&np->saddr, &ifa->addr);
+ if (err == 0) {
+ if(ipv6_addr_any(&np->saddr))
+ ipv6_addr_copy(&np->saddr, &saddr);
- if(ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_copy(&np->rcv_saddr, &ifa->addr);
- sk->rcv_saddr = 0xffffffff;
+ if(ipv6_addr_any(&np->rcv_saddr)) {
+ ipv6_addr_copy(&np->rcv_saddr, &saddr);
+ sk->rcv_saddr = 0xffffffff;
+ }
+ sk->state = TCP_ESTABLISHED;
}
+ fl6_sock_release(flowlabel);
- sk->state = TCP_ESTABLISHED;
-
- return(0);
+ return err;
}
static void udpv6_close(struct sock *sk, long timeout)
@@ -317,7 +338,7 @@ static void udpv6_close(struct sock *sk, long timeout)
destroy_sock(sk);
}
-#if defined(CONFIG_FILTER) || !defined(HAVE_CSUM_COPY_USER)
+#ifndef HAVE_CSUM_COPY_USER
#undef CONFIG_UDP_DELAY_CSUM
#endif
@@ -352,15 +373,15 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov, copied);
#else
- if (sk->no_check || skb->ip_summed==CHECKSUM_UNNECESSARY) {
+ if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
} else if (copied > msg->msg_iov[0].iov_len || (msg->msg_flags&MSG_TRUNC)) {
- if (csum_fold(csum_partial(skb->h.raw, ntohs(skb->h.uh->len), skb->csum))) {
+ if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum))) {
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
- err = (msg->msg_flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
+ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
udp_stats_in6.UdpInErrors++;
goto out_free;
}
@@ -373,11 +394,11 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
csum = csum_and_copy_to_user((char*)&skb->h.uh[1], msg->msg_iov[0].iov_base, copied, csum, &err);
if (err)
goto out_free;
- if (csum_fold(csum)) {
+ if ((unsigned short)csum_fold(csum)) {
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
- err = (msg->msg_flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
+ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
udp_stats_in6.UdpInErrors++;
goto out_free;
}
@@ -395,6 +416,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
sin6 = (struct sockaddr_in6 *) msg->msg_name;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = skb->h.uh->source;
+ sin6->sin6_flowinfo = 0;
if (skb->protocol == __constant_htons(ETH_P_IP)) {
ipv6_addr_set(&sin6->sin6_addr, 0, 0,
@@ -454,6 +476,17 @@ void udpv6_err(struct sk_buff *skb, struct ipv6hdr *hdr,
static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
+#if defined(CONFIG_FILTER) && defined(CONFIG_UDP_DELAY_CSUM)
+ if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
+ if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum))) {
+ udp_stats_in6.UdpInErrors++;
+ ipv6_statistics.Ip6InDiscards++;
+ kfree_skb(skb);
+ return 0;
+ }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+#endif
if (sock_queue_rcv_skb(sk,skb)<0) {
udp_stats_in6.UdpInErrors++;
ipv6_statistics.Ip6InDiscards++;
@@ -627,14 +660,13 @@ int udpv6_rcv(struct sk_buff *skb, unsigned long len)
if (sk == NULL) {
#ifdef CONFIG_UDP_DELAY_CSUM
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
- csum_fold(csum_partial((char*)uh, len, skb->csum)))
+ (unsigned short)csum_fold(csum_partial((char*)uh, len, skb->csum)))
goto discard;
#endif
-
udp_stats_in6.UdpNoPorts++;
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
-
+
kfree_skb(skb);
return(0);
}
@@ -723,10 +755,10 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct ipv6_txoptions *opt = NULL;
+ struct ip6_flowlabel *flowlabel = NULL;
struct flowi fl;
int addr_len = msg->msg_namelen;
struct in6_addr *daddr;
- struct in6_addr *saddr = NULL;
int len = ulen + sizeof(struct udphdr);
int addr_type;
int hlimit = -1;
@@ -741,23 +773,35 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
if (msg->msg_flags & ~(MSG_DONTROUTE|MSG_DONTWAIT))
return(-EINVAL);
-
+
+ fl.fl6_flowlabel = 0;
+
if (sin6) {
if (sin6->sin6_family == AF_INET)
return udp_sendmsg(sk, msg, ulen);
if (addr_len < sizeof(*sin6))
return(-EINVAL);
-
+
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
return(-EINVAL);
if (sin6->sin6_port == 0)
return(-EINVAL);
-
+
udh.uh.dest = sin6->sin6_port;
daddr = &sin6->sin6_addr;
+ if (np->sndflow) {
+ fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
+ if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ daddr = &flowlabel->dst;
+ }
+ }
+
/* Otherwise it will be difficult to maintain sk->dst_cache. */
if (sk->state == TCP_ESTABLISHED &&
!ipv6_addr_cmp(daddr, &sk->net_pinfo.af_inet6.daddr))
@@ -765,38 +809,52 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
} else {
if (sk->state != TCP_ESTABLISHED)
return(-ENOTCONN);
-
+
udh.uh.dest = sk->dport;
daddr = &sk->net_pinfo.af_inet6.daddr;
+ fl.fl6_flowlabel = np->flow_label;
}
-
+
addr_type = ipv6_addr_type(daddr);
-
+
if (addr_type == IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
-
+
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
sin.sin_port = udh.uh.dest;
msg->msg_name = (struct sockaddr *)(&sin);
msg->msg_namelen = sizeof(sin);
+ fl6_sock_release(flowlabel);
return udp_sendmsg(sk, msg, ulen);
}
-
+
udh.daddr = NULL;
fl.oif = sk->bound_dev_if;
-
+ fl.fl6_src = NULL;
+
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
- err = datagram_send_ctl(msg, &fl.oif, &saddr, opt, &hlimit);
- if (err < 0)
+ err = datagram_send_ctl(msg, &fl, opt, &hlimit);
+ if (err < 0) {
+ fl6_sock_release(flowlabel);
return err;
+ }
+ if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
+ flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
+ if (flowlabel == NULL)
+ return -EINVAL;
+ }
+ if (!(opt->opt_nflen|opt->opt_flen))
+ opt = NULL;
}
- if (opt == NULL || !(opt->opt_nflen|opt->opt_flen))
+ if (opt == NULL)
opt = np->opt;
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
if (opt && opt->srcrt)
udh.daddr = daddr;
@@ -808,14 +866,15 @@ static int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, int ulen)
udh.pl_len = len;
fl.proto = IPPROTO_UDP;
- fl.nl_u.ip6_u.daddr = daddr;
- fl.nl_u.ip6_u.saddr = saddr;
+ fl.fl6_dst = daddr;
fl.uli_u.ports.dport = udh.uh.dest;
fl.uli_u.ports.sport = udh.uh.source;
err = ip6_build_xmit(sk, udpv6_getfrag, &udh, &fl, len, opt, hlimit,
msg->msg_flags);
+ fl6_sock_release(flowlabel);
+
if (err < 0)
return err;
diff --git a/net/ipx/Makefile b/net/ipx/Makefile
index 39639c6dc..fb55995ae 100644
--- a/net/ipx/Makefile
+++ b/net/ipx/Makefile
@@ -10,8 +10,12 @@
# We only get in/to here if CONFIG_IPX = 'y' or 'm'
O_TARGET := ipx.o
-M_OBJS := $(O_TARGET)
-OX_OBJS += af_ipx.o
+M_OBJS :=
+OX_OBJS := af_ipx.o
+
+ifeq ($(CONFIG_IPX),m)
+ M_OBJS += $(O_TARGET)
+endif
ifeq ($(CONFIG_SYSCTL),y)
O_OBJS += sysctl_net_ipx.o
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 8ff0fe317..929278b68 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -766,11 +766,10 @@ static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb)
i = 0;
- /* Dump packet if too many hops or already seen this net */
- if(ipx->ipx_tctrl < 8)
- for( ; i < ipx->ipx_tctrl; i++)
- if(*l++ == intrfc->if_netnum)
- break;
+ /* Dump packet if already seen this net */
+ for( ; i < ipx->ipx_tctrl; i++)
+ if(*l++ == intrfc->if_netnum)
+ break;
if(i == ipx->ipx_tctrl)
{
@@ -779,6 +778,10 @@ static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb)
/* xmit on all other interfaces... */
for(ifcs = ipx_interfaces; ifcs != NULL; ifcs = ifcs->if_next)
{
+ /* Except unconfigured interfaces */
+ if(ifcs->if_netnum == 0)
+ continue;
+
/* That aren't in the list */
l = (__u32 *) c;
for(i = 0; i <= ipx->ipx_tctrl; i++)
@@ -2074,18 +2077,16 @@ int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* Too small? */
if(ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr))
- {
- kfree_skb(skb);
- return (0);
- }
-
+ goto drop;
+
+ /* Not ours */
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ goto drop;
+
if(ipx->ipx_checksum != IPX_NO_CHECKSUM)
{
if(ipx_set_checksum(ipx, ntohs(ipx->ipx_pktsize)) != ipx->ipx_checksum)
- {
- kfree_skb(skb);
- return (0);
- }
+ goto drop;
}
/* Determine what local ipx endpoint this is */
@@ -2099,13 +2100,14 @@ int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
}
if(intrfc == NULL) /* Not one of ours */
- {
- kfree_skb(skb);
- return (0);
- }
+ goto drop;
}
return (ipxitf_rcv(intrfc, skb));
+
+drop:
+ kfree_skb(skb);
+ return (0);
}
static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, int len,
@@ -2133,8 +2135,11 @@ static int ipx_sendmsg(struct socket *sock, struct msghdr *msg, int len,
uaddr.sipx_port = 0;
uaddr.sipx_network = 0L;
#ifdef CONFIG_IPX_INTERN
- memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc
- ->if_node, IPX_NODE_LEN);
+ if(sk->protinfo.af_ipx.intrfc)
+ memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc
+ ->if_node,IPX_NODE_LEN);
+ else
+ return -ENETDOWN; /* Someone zonked the iface */
#endif
ret = ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
diff --git a/net/irda/Config.in b/net/irda/Config.in
index f73a05766..8912d6cb9 100644
--- a/net/irda/Config.in
+++ b/net/irda/Config.in
@@ -12,7 +12,6 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
if [ "$CONFIG_IRDA" != "n" ] ; then
comment 'IrDA protocols'
source net/irda/irlan/Config.in
- source net/irda/irobex/Config.in
source net/irda/ircomm/Config.in
source net/irda/irlpt/Config.in
@@ -21,7 +20,6 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
comment ' IrDA options'
bool ' Cache last LSAP' CONFIG_IRDA_CACHE_LAST_LSAP
bool ' Fast RRs' CONFIG_IRDA_FAST_RR
- bool ' Recycle RRs' CONFIG_IRDA_RECYCLE_RR
bool ' Debug information' CONFIG_IRDA_DEBUG
fi
fi
diff --git a/net/irda/Makefile b/net/irda/Makefile
index 4af71d35f..b3a4a371b 100644
--- a/net/irda/Makefile
+++ b/net/irda/Makefile
@@ -10,13 +10,20 @@
ALL_SUB_DIRS := irlan ircomm irlpt compressors
SUB_DIRS :=
MOD_SUB_DIRS :=
+OX_OBJS :=
O_TARGET := irda.o
-O_OBJS := irmod.o iriap.o iriap_event.o irlmp.o irlmp_event.o irlmp_frame.o \
+O_OBJS := iriap.o iriap_event.o irlmp.o irlmp_event.o irlmp_frame.o \
irlap.o irlap_event.o irlap_frame.o timer.o qos.o irqueue.o \
- irttp.o irda_device.o irias_object.o crc.o wrapper.o af_irda.o
+ irttp.o irda_device.o irias_object.o crc.o wrapper.o af_irda.o \
+ discovery.o
+OX_OBJS := irmod.o
+
MOD_LIST_NAME := IRDA_MODULES
+
+ifeq ($(CONFIG_IRDA),m)
M_OBJS := $(O_TARGET)
+endif
ifeq ($(CONFIG_IRDA_COMPRESSION),y)
O_OBJS += irlap_comp.o
@@ -48,15 +55,6 @@ else
endif
endif
-ifeq ($(CONFIG_IROBEX),y)
-SUB_DIRS += irobex
-O_OBJS += irobex/irobex.o
-else
- ifeq ($(CONFIG_IROBEX),m)
- MOD_SUB_DIRS += irobex
- endif
-endif
-
ifeq ($(CONFIG_IRDA_COMPRESSION),y)
SUB_DIRS += compressors
MOD_IN_SUB_DIRS += compressors
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 85c191b29..6dd118024 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1,16 +1,16 @@
/*********************************************************************
*
* Filename: af_irda.c
- * Version: 0.1
+ * Version: 0.6
* Description: IrDA sockets implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun May 31 10:12:43 1998
- * Modified at: Thu Jan 14 13:42:16 1999
+ * Modified at: Thu Apr 22 12:08:04 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
- * Sources: af_netroom.c, af_ax25.x
+ * Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc.
*
- * Copyright (c) 1997 Dag Brattli, All Rights Reserved.
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -26,201 +26,513 @@
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/net.h>
+#include <linux/irda.h>
+
+#include <asm/uaccess.h>
+
#include <net/sock.h>
-#include <asm/segment.h>
#include <net/irda/irda.h>
-#include <net/irda/irmod.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irias_object.h>
#include <net/irda/irttp.h>
+#include <net/irda/discovery.h>
-extern int irda_init(void);
+extern int irda_init(void);
extern void irda_cleanup(void);
-extern int irlap_input(struct sk_buff *, struct device *, struct packet_type *);
+extern int irlap_driver_rcv(struct sk_buff *, struct device *,
+ struct packet_type *);
-#define IRDA_MAX_HEADER (TTP_HEADER+LMP_HEADER+LAP_HEADER)
+static struct proto_ops irda_proto_ops;
+static hashbin_t *cachelog = NULL;
+static struct wait_queue *discovery_wait; /* Wait for discovery */
-#ifdef IRDA_SOCKETS
+#define IRDA_MAX_HEADER (TTP_HEADER+LMP_HEADER+LAP_HEADER)
/*
- * Function irda_getname (sock, uaddr, uaddr_len, peer)
+ * Function irda_data_indication (instance, sap, skb)
*
- *
+ * Received some data from TinyTP. Just queue it on the receive queue
*
*/
-static int irda_getname( struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb)
{
- DEBUG( 0, __FUNCTION__ "(), Not implemented!\n");
+ struct irda_sock *self;
+ struct sock *sk;
+ int err;
+
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ self = (struct irda_sock *) instance;
+ ASSERT(self != NULL, return -1;);
+
+ sk = self->sk;
+ ASSERT(sk != NULL, return -1;);
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err) {
+ DEBUG(1, __FUNCTION__ "(), error: no more mem!\n");
+ self->rx_flow = FLOW_STOP;
+
+ /* When we return error, TTP will need to requeue the skb */
+ return err;
+ }
return 0;
}
/*
- * Function irda_sendmsg (sock, msg, len, noblock, flags)
+ * Function irda_disconnect_indication (instance, sap, reason, skb)
*
- *
+ * Connection has been closed. Chech reason to find out why
*
*/
-static int irda_sendmsg( struct socket *sock, struct msghdr *msg, int len,
- int noblock, int flags)
+static void irda_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *skb)
{
-#if 0
- struct sock *sk = (struct sock *) sock->data;
- /* struct sockaddr_irda *usax = (struct sockaddr_irda *)msg->msg_name; */
- int err;
- struct sk_buff *skb;
- unsigned char *asmptr;
- int size;
- struct tsap_cb *tsap;
-
- DEBUG( 0, __FUNCTION__ "()\n");
+ struct irda_sock *self;
+ struct sock *sk;
- if (sk->err)
- return sock_error(sk);
+ DEBUG(1, __FUNCTION__ "()\n");
- if (flags)
- return -EINVAL;
-
- if (sk->zapped)
- return -EADDRNOTAVAIL;
-
- if (sk->debug)
- printk( "IrDA: sendto: Addresses built.\n");
-
- /* Build a packet */
- if (sk->debug)
- printk( "IrDA: sendto: building packet.\n");
-
- size = len + IRDA_MAX_HEADER;
-
- if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL)
- return err;
-
- skb->sk = sk;
- skb->free = 1;
- skb->arp = 1;
+ self = (struct irda_sock *) instance;
+
+ sk = self->sk;
+ if (sk == NULL)
+ return;
+
+ sk->state = TCP_CLOSE;
+ sk->err = reason;
+ sk->shutdown |= SEND_SHUTDOWN;
+ if (!sk->dead) {
+ sk->state_change(sk);
+ sk->dead = 1;
+ }
+}
+
+/*
+ * Function irda_connect_confirm (instance, sap, qos, max_sdu_size, skb)
+ *
+ * Connections has been confirmed by the remote device
+ *
+ */
+static void irda_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size, struct sk_buff *skb)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ self = (struct irda_sock *) instance;
+
+ self->max_sdu_size_tx = max_sdu_size;
+ memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
+
+ sk = self->sk;
+ if (sk == NULL)
+ return;
+
+ /* We are now connected! */
+ sk->state = TCP_ESTABLISHED;
+ sk->state_change(sk);
+}
+
+/*
+ * Function irda_connect_indication(instance, sap, qos, max_sdu_size, userdata)
+ *
+ * Incomming connection
+ *
+ */
+static void irda_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *skb)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ self = (struct irda_sock *) instance;
- skb_reserve(skb, size - len);
+ self->max_sdu_size_tx = max_sdu_size;
+ memcpy(&self->qos_tx, qos, sizeof(struct qos_info));
+
+ sk = self->sk;
+ if (sk == NULL)
+ return;
- memcpy_fromiovec( asmptr, msg->msg_iov, len);
+ skb_queue_tail(&sk->receive_queue, skb);
+
+ sk->state_change(sk);
+}
+
+/*
+ * Function irda_connect_response (handle)
+ *
+ * Accept incomming connection
+ *
+ */
+void irda_connect_response(struct irda_sock *self)
+{
+ struct sk_buff *skb;
+
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+
+ skb = dev_alloc_skb(64);
+ if (skb == NULL) {
+ DEBUG( 0, __FUNCTION__ "() Could not allocate sk_buff!\n");
+ return;
+ }
+
+ /* Reserve space for MUX_CONTROL and LAP header */
+ skb_reserve(skb, TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER);
+
+ irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb);
+}
+
+
+/*
+ * Function irda_flow_indication (instance, sap, flow)
+ *
+ * Used by TinyTP to tell us if it can accept more data or not
+ *
+ */
+static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
+{
+ struct irda_sock *self;
+ struct sock *sk;
+
+ DEBUG(1, __FUNCTION__ "()\n");
- if (sk->debug)
- printk( "IrDA: Transmitting buffer\n");
+ self = (struct irda_sock *) instance;
+ ASSERT(self != NULL, return;);
+
+ sk = self->sk;
+ ASSERT(sk != NULL, return;);
- if (sk->state != TCP_ESTABLISHED) {
- kfree_skb( skb, FREE_WRITE);
- return -ENOTCONN;
+ switch (flow) {
+ case FLOW_STOP:
+ DEBUG( 0, __FUNCTION__ "(), IrTTP wants us to slow down\n");
+ self->tx_flow = flow;
+ break;
+ case FLOW_START:
+ self->tx_flow = flow;
+ DEBUG(0, __FUNCTION__ "(), IrTTP wants us to start again\n");
+ wake_up_interruptible(sk->sleep);
+ break;
+ default:
+ DEBUG( 0, __FUNCTION__ "(), Unknown flow command!\n");
}
+}
+
+/*
+ * Function irda_get_value_confirm (obj_id, value, priv)
+ *
+ * Got answer from remote LM-IAS
+ *
+ */
+static void irda_get_value_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
+{
+ struct irda_sock *self;
- tsap = (struct tsap_cb *) sk->protinfo.irda;
- ASSERT( tsap != NULL, return -ENODEV;);
- ASSERT( tsap->magic == TTP_TSAP_MAGIC, return -EBADR;);
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ ASSERT(priv != NULL, return;);
+ self = (struct irda_sock *) priv;
- irttp_data_request( tsap, skb);
-#endif
- return len;
+ if (!self)
+ return;
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ DEBUG(0, __FUNCTION__ "(), IAS query failed!\n");
+
+ self->errno = result;
+
+ /* Wake up any processes waiting for result */
+ wake_up_interruptible(&self->ias_wait);
+
+ return;
+ }
+
+ switch (value->type) {
+ case IAS_INTEGER:
+ DEBUG(4, __FUNCTION__ "() int=%d\n", value->t.integer);
+
+ if (value->t.integer != -1) {
+ self->dtsap_sel = value->t.integer;
+ } else
+ self->dtsap_sel = 0;
+ break;
+ default:
+ DEBUG(0, __FUNCTION__ "(), bad type!\n");
+ break;
+ }
+ /* Wake up any processes waiting for result */
+ wake_up_interruptible(&self->ias_wait);
}
/*
- * Function irda_recvmsg (sock, msg, size, noblock, flags, addr_len)
+ * Function irda_discovery_indication (log)
*
- *
+ * Got a discovery log from IrLMP, wake ut any process waiting for answer
*
*/
-static int irda_recvmsg( struct socket *sock, struct msghdr *msg, int size,
- int noblock, int flags, int *addr_len)
+static void irda_discovery_indication(hashbin_t *log)
{
- int copied=0;
-#if 0
- struct sock *sk = (struct sock *)sock->data;
- struct sockaddr_irda *sax = (struct sockaddr_irda *)msg->msg_name;
- struct sk_buff *skb;
- int er;
+ DEBUG(1, __FUNCTION__ "()\n");
- DEBUG( 0, __FUNCTION__ "()\n");
+ cachelog = log;
- if (sk->err)
- return sock_error(sk);
+ /* Wake up process if its waiting for device to be discovered */
+ wake_up_interruptible(&discovery_wait);
+}
+
+/*
+ * Function irda_open_tsap (self)
+ *
+ * Open local Transport Service Access Point (TSAP)
+ *
+ */
+static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name)
+{
+ struct notify_t notify;
- if (addr_len != NULL)
- *addr_len = sizeof(*sax);
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ /* Initialize callbacks to be used by the IrDA stack */
+ irda_notify_init(&notify);
+ notify.connect_confirm = irda_connect_confirm;
+ notify.connect_indication = irda_connect_indication;
+ notify.disconnect_indication = irda_disconnect_indication;
+ notify.data_indication = irda_data_indication;
+ notify.flow_indication = irda_flow_indication;
+ notify.instance = self;
+ strncpy(notify.name, name, NOTIFY_MAX_NAME);
+
+ self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT,
+ &notify);
+ if (self->tsap == NULL) {
+ DEBUG( 0, __FUNCTION__ "(), Unable to allocate TSAP!\n");
+ return -1;
+ }
+ /* Remember which TSAP selector we actually got */
+ self->stsap_sel = self->tsap->stsap_sel;
- /*
- * This works for seqpacket too. The receiver has ordered the queue for
- * us! We do one quick check first though
- */
- if (sk->state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ return 0;
+}
+
+/*
+ * Function irda_find_lsap_sel (self, name)
+ *
+ * Try to lookup LSAP selector in remote LM-IAS
+ *
+ */
+static int irda_find_lsap_sel(struct irda_sock *self, char *name)
+{
+ DEBUG(1, __FUNCTION__ "()\n");
- /* Now we can treat all alike */
- if ((skb = skb_recv_datagram( sk, flags, noblock, &er)) == NULL)
- return er;
+ ASSERT(self != NULL, return -1;);
-/* if (!sk->nr->hdrincl) { */
-/* skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); */
-/* skb->h.raw = skb->data; */
-/* } */
+ /* Query remote LM-IAS */
+ iriap_getvaluebyclass_request(name, "IrDA:TinyTP:LsapSel",
+ self->saddr, self->daddr,
+ irda_get_value_confirm, self);
+ /* Wait for answer */
+ interruptible_sleep_on(&self->ias_wait);
- copied = (size < skb->len) ? size : skb->len;
- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (self->dtsap_sel)
+ return 0;
- skb_free_datagram(sk, skb);
-#endif
- return copied;
+ return -ENETUNREACH; /* May not be true */
+}
+
+/*
+ * Function irda_getname (sock, uaddr, uaddr_len, peer)
+ *
+ * Return the our own, or peers socket address (sockaddr_irda)
+ *
+ */
+static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct sockaddr_irda saddr;
+ struct sock *sk = sock->sk;
+
+ if (peer) {
+ if (sk->state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ saddr.sir_family = AF_IRDA;
+ saddr.sir_lsap_sel = sk->protinfo.irda->dtsap_sel;
+ saddr.sir_addr = sk->protinfo.irda->daddr;
+ } else {
+ saddr.sir_family = AF_IRDA;
+ saddr.sir_lsap_sel = sk->protinfo.irda->stsap_sel;
+ saddr.sir_addr = sk->protinfo.irda->saddr;
+ }
+
+ DEBUG(1, __FUNCTION__ "(), tsap_sel = %#x\n", saddr.sir_lsap_sel);
+ DEBUG(1, __FUNCTION__ "(), addr = %08x\n", saddr.sir_addr);
+
+ if (*uaddr_len > sizeof (struct sockaddr_irda))
+ *uaddr_len = sizeof (struct sockaddr_irda);
+ memcpy(uaddr, &saddr, *uaddr_len);
+
+ return 0;
}
/*
* Function irda_listen (sock, backlog)
*
- *
+ * Just move to the listen state
*
*/
static int irda_listen( struct socket *sock, int backlog)
{
-#if 0
- struct sock *sk = (struct sock *)sock->data;
+ struct sock *sk = sock->sk;
- if (sk->state != TCP_LISTEN) {
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ if (sk->type == SOCK_STREAM && sk->state != TCP_LISTEN) {
sk->max_ack_backlog = backlog;
sk->state = TCP_LISTEN;
+
return 0;
- }
-#endif
+ }
+
return -EOPNOTSUPP;
}
/*
* Function irda_bind (sock, uaddr, addr_len)
*
- * Bind to a specified TSAP
+ * Used by servers to register their well known TSAP
*
*/
-static int irda_bind( struct socket *sock, struct sockaddr *uaddr,
- int addr_len)
+static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
-#if 0
- struct sock *sk;
- struct full_sockaddr_irda *addr = (struct full_sockaddr_irda *)uaddr;
- struct device *dev;
- irda_address *user, *source;
- struct tsap_cb *tsap;
- struct notify_t notify;
+ struct sock *sk = sock->sk;
+ struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
+ struct irda_sock *self;
+ __u16 hints = 0;
+ int err;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
- sk = (struct sock *) sock->data;
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
- if ( sk->zapped == 0)
- return -EIO;
+ if ((addr_len < sizeof(struct sockaddr_irda)) ||
+ (addr_len > sizeof(struct sockaddr_irda)))
+ return -EINVAL;
+
+ err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
+ if (err < 0)
+ return -ENOMEM;
- irda_notify_init( &notify);
- tsap = irttp_open_tsap( LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
+ /* Register with LM-IAS */
+ self->ias_obj = irias_new_object(addr->sir_name, jiffies);
+ irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel",
+ self->stsap_sel);
+ irias_insert_object(self->ias_obj);
+
+ /* Fill in some default hint bits values */
+ if (strncmp(addr->sir_name, "OBEX", 4) == 0)
+ hints = irlmp_service_to_hint(S_OBEX);
+
+ if (hints)
+ self->skey = irlmp_register_service(hints);
+
+ return 0;
+}
+
+/*
+ * Function irda_accept (sock, newsock, flags)
+ *
+ * Wait for incomming connection
+ *
+ */
+static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ struct irda_sock *self, *new;
+ struct sock *sk = sock->sk;
+ struct sock *newsk;
+ struct sk_buff *skb;
+
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
+
+ if (sock->state != SS_UNCONNECTED)
+ return -EINVAL;
+
+ if ((sk = sock->sk) == NULL)
+ return -EINVAL;
+
+ if (sk->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ if (sk->state != TCP_LISTEN)
+ return -EINVAL;
+
+ /*
+ * The read queue this time is holding sockets ready to use
+ * hooked into the SABM we saved
+ */
+ do {
+ if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) {
+ if (flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+
+ interruptible_sleep_on(sk->sleep);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ }
+ } while (skb == NULL);
+
+ newsk = newsock->sk;
+ newsk->state = TCP_ESTABLISHED;
- sk->zapped = 0;
+ new = newsk->protinfo.irda;
+ ASSERT(new != NULL, return -1;);
+
+ /* Now attach up the new socket */
+ new->tsap = irttp_dup(self->tsap, new);
+ if (!new->tsap) {
+ DEBUG(0, __FUNCTION__ "(), dup failed!\n");
+ return -1;
+ }
+
+ new->stsap_sel = new->tsap->stsap_sel;
+ new->dtsap_sel = new->tsap->dtsap_sel;
+ new->saddr = irttp_get_saddr(new->tsap);
+ new->saddr = irttp_get_saddr(new->tsap);
+
+ new->max_sdu_size_tx = self->max_sdu_size_tx;
+ new->max_sdu_size_rx = self->max_sdu_size_rx;
+ memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info));
+
+ /* Clean up the original one to keep it in listen state */
+ self->tsap->dtsap_sel = self->tsap->lsap->dlsap_sel = LSAP_ANY;
+ self->tsap->lsap->lsap_state = LSAP_DISCONNECTED;
+
+ skb->sk = NULL;
+ skb->destructor = NULL;
+ kfree_skb(skb);
+ sk->ack_backlog--;
+
+ newsock->state = SS_CONNECTED;
+
+ irda_connect_response(new);
- if (sk->debug)
- printk("IrDA: socket is bound\n");
-#endif
return 0;
}
@@ -230,17 +542,18 @@ static int irda_bind( struct socket *sock, struct sockaddr *uaddr,
* Connect to a IrDA device
*
*/
-static int irda_connect( struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags)
+static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
{
-#if 0
- struct sock *sk = (struct sock *)sock->data;
- struct sockaddr_irda *addr = (struct sockaddr_irda *)uaddr;
- irda_address *user, *source = NULL;
- struct device *dev;
+ struct sock *sk = sock->sk;
+ struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr;
+ struct irda_sock *self;
+ int err;
+
+ self = sk->protinfo.irda;
+
+ DEBUG(1, __FUNCTION__ "()\n");
- DEBUG( 0, __FUNCTION__ "()\n");
-
if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
return 0; /* Connect completed during a ERESTARTSYS event */
@@ -260,6 +573,36 @@ static int irda_connect( struct socket *sock, struct sockaddr *uaddr,
if (addr_len != sizeof(struct sockaddr_irda))
return -EINVAL;
+ /* Check if user supplied the required destination device address */
+ if (!addr->sir_addr)
+ return -EINVAL;
+
+ self->daddr = addr->sir_addr;
+ DEBUG(1, __FUNCTION__ "(), daddr = %08x\n", self->daddr);
+
+ /* Query remote LM-IAS */
+ err = irda_find_lsap_sel(self, addr->sir_name);
+ if (err) {
+ DEBUG(0, __FUNCTION__ "(), connect failed!\n");
+ return err;
+ }
+
+ /* Check if we have opened a local TSAP */
+ if (!self->tsap)
+ irda_open_tsap(self, LSAP_ANY, addr->sir_name);
+
+ /* Move to connecting socket, start sending Connect Requests */
+ sock->state = SS_CONNECTING;
+ sk->state = TCP_SYN_SENT;
+
+ /* Connect to remote device */
+ err = irttp_connect_request(self->tsap, self->dtsap_sel,
+ self->saddr, self->daddr, NULL,
+ self->max_sdu_size_rx, NULL);
+ if (err) {
+ DEBUG(0, __FUNCTION__ "(), connect failed!\n");
+ return err;
+ }
/* Now the loop */
if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
@@ -269,9 +612,9 @@ static int irda_connect( struct socket *sock, struct sockaddr *uaddr,
/* A Connect Ack with Choke or timeout or failed routing will go to
* closed. */
- while ( sk->state == TCP_SYN_SENT) {
- interruptible_sleep_on( sk->sleep);
- if (current->signal & ~current->blocked) {
+ while (sk->state == TCP_SYN_SENT) {
+ interruptible_sleep_on(sk->sleep);
+ if (signal_pending(current)) {
sti();
return -ERESTARTSYS;
}
@@ -280,152 +623,98 @@ static int irda_connect( struct socket *sock, struct sockaddr *uaddr,
if (sk->state != TCP_ESTABLISHED) {
sti();
sock->state = SS_UNCONNECTED;
- return sock_error( sk); /* Always set at this point */
+ return sock_error(sk); /* Always set at this point */
}
sock->state = SS_CONNECTED;
-
- sti();
-#endif
- return 0;
-}
-
-static int irda_socketpair( struct socket *sock1, struct socket *sock2)
-{
- DEBUG( 0, __FUNCTION__ "(), Not implemented\n");
-
- return -EOPNOTSUPP;
-}
-
-static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
-{
- struct sock *sk;
- struct sock *newsk;
- struct sk_buff *skb;
-
- DEBUG( 0, __FUNCTION__ "()\n");
-#if 0
- if (newsock->data)
- sk_free(newsock->data);
-
- newsock->data = NULL;
-
- sk = (struct sock *)sock->data;
-
- if (sk->type != SOCK_SEQPACKET)
- return -EOPNOTSUPP;
- if (sk->state != TCP_LISTEN)
- return -EINVAL;
-
- /*
- * The write queue this time is holding sockets ready to use
- * hooked into the SABM we saved
- */
- do {
- cli();
- if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) {
- if (flags & O_NONBLOCK) {
- sti();
- return 0;
- }
- interruptible_sleep_on(sk->sleep);
- if (current->signal & ~current->blocked) {
- sti();
- return -ERESTARTSYS;
- }
- }
- } while (skb == NULL);
-
- newsk = skb->sk;
- newsk->pair = NULL;
sti();
-
- /* Now attach up the new socket */
- skb->sk = NULL;
- kfree_skb(skb, FREE_READ);
- sk->ack_backlog--;
- newsock->data = newsk;
-#endif
+
return 0;
}
-static void def_callback1(struct sock *sk)
-{
- DEBUG( 0, __FUNCTION__ "()\n");
-
- if (!sk->dead)
- wake_up_interruptible(sk->sleep);
-}
-
-static void def_callback2(struct sock *sk, int len)
-{
- DEBUG( 0, __FUNCTION__ "()\n");
-
- if (!sk->dead)
- wake_up_interruptible(sk->sleep);
-}
-
-
/*
* Function irda_create (sock, protocol)
*
* Create IrDA socket
*
*/
-static int irda_create( struct socket *sock, int protocol)
+static int irda_create(struct socket *sock, int protocol)
{
struct sock *sk;
+ struct irda_sock *self;
- DEBUG( 0, __FUNCTION__ "()\n");
-#if 0
- if (sock->type != SOCK_SEQPACKET || protocol != 0)
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ /* Check for valid socket type */
+ switch (sock->type) {
+ case SOCK_STREAM: /* FALLTHROUGH */
+ case SOCK_SEQPACKET:
+ break;
+ default:
return -ESOCKTNOSUPPORT;
+ }
/* Allocate socket */
- if ((sk = sk_alloc( GFP_ATOMIC)) == NULL)
+ if ((sk = sk_alloc(PF_IRDA, GFP_ATOMIC, 1)) == NULL)
+ return -ENOMEM;
+
+ self = kmalloc(sizeof(struct irda_sock), GFP_ATOMIC);
+ if (self == NULL)
return -ENOMEM;
+ memset(self, 0, sizeof(struct irda_sock));
+
+ self->sk = sk;
+ sk->protinfo.irda = self;
+
+ sock_init_data(sock, sk);
+
+ sock->ops = &irda_proto_ops;
+ sk->protocol = protocol;
+
+ /* Register as a client with IrLMP */
+ self->ckey = irlmp_register_client(0, NULL, NULL);
+ self->mask = 0xffff;
+ self->rx_flow = self->tx_flow = FLOW_START;
+ self->max_sdu_size_rx = SAR_DISABLE; /* Default value */
+ self->nslots = DISCOVERY_DEFAULT_SLOTS;
+
+ /* Notify that we are using the irda module, so nobody removes it */
+ irda_mod_inc_use_count();
- skb_queue_head_init(&sk->receive_queue);
- skb_queue_head_init(&sk->write_queue);
- skb_queue_head_init(&sk->back_log);
-
- init_timer( &sk->timer);
-
- sk->socket = sock;
- sk->type = sock->type;
- sk->protocol = protocol;
- sk->allocation = GFP_KERNEL;
- sk->rcvbuf = SK_RMEM_MAX;
- sk->sndbuf = SK_WMEM_MAX;
- sk->state = TCP_CLOSE;
- sk->priority = SOPRI_NORMAL;
- sk->mtu = 2048; /* FIXME, insert the right size*/
- sk->zapped = 1;
-/* sk->window = nr_default.window; */
-
- sk->state_change = def_callback1;
- sk->data_ready = def_callback2;
- sk->write_space = def_callback1;
- sk->error_report = def_callback1;
-
- if (sock != NULL) {
- sock->data = (void *)sk;
- sk->sleep = sock->wait;
- }
-#endif
return 0;
}
/*
- * Function irda_destroy_socket (tsap)
+ * Function irda_destroy_socket (self)
*
* Destroy socket
*
*/
-void irda_destroy_socket(struct tsap_cb *tsap)
+void irda_destroy_socket(struct irda_sock *self)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+
+ /* Unregister with IrLMP */
+ irlmp_unregister_client(self->ckey);
+ irlmp_unregister_service(self->skey);
+
+ /* Unregister with LM-IAS */
+ if (self->ias_obj)
+ irias_delete_object(self->ias_obj->name);
+
+ if (self->tsap) {
+ irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ }
+
+ kfree(self);
+
+ /* Notify that we are not using the irda module anymore */
+ irda_mod_dec_use_count();
return;
}
@@ -436,208 +725,399 @@ void irda_destroy_socket(struct tsap_cb *tsap)
*
*
*/
-static int irda_release( struct socket *sock, struct socket *peer)
+static int irda_release(struct socket *sock, struct socket *peer)
{
-#if 0
- struct sock *sk = (struct sock *)sock->data;
+ struct sock *sk = sock->sk;
- DEBUG( 0, __FUNCTION__ "()\n");
-
+ DEBUG(1, __FUNCTION__ "()\n");
- if (sk == NULL) return 0;
+ if (sk == NULL)
+ return 0;
- if (sk->type == SOCK_SEQPACKET) {
+ sk->state = TCP_CLOSE;
+ sk->shutdown |= SEND_SHUTDOWN;
+ sk->state_change(sk);
+ sk->dead = 1;
-
- } else {
- sk->state = TCP_CLOSE;
- sk->shutdown |= SEND_SHUTDOWN;
- sk->state_change(sk);
- sk->dead = 1;
- irda_destroy_socket( sk->protinfo.irda);
- }
+ irda_destroy_socket(sk->protinfo.irda);
+
+ sock->sk = NULL;
+ sk->socket = NULL; /* Not used, but we should do this. */
- sock->data = NULL;
- sk->socket = NULL; /* Not used, but we should do this. **/
-#endif
return 0;
}
-
/*
- * Function irda_dup (newsock, oldsock)
+ * Function irda_sendmsg (sock, msg, len, scm)
*
- *
+ * Send message down to TinyTP
*
*/
-static int irda_dup( struct socket *newsock, struct socket *oldsock)
+static int irda_sendmsg(struct socket *sock, struct msghdr *msg, int len,
+ struct scm_cookie *scm)
{
-#if 0
- struct sock *sk = (struct sock *)oldsock->data;
+ struct sock *sk = sock->sk;
+/* struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; */
+ struct irda_sock *self;
+ struct sk_buff *skb;
+ unsigned char *asmptr;
+ int err;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "(), len=%d\n", len);
- if (sk == NULL || newsock == NULL)
- return -EINVAL;
+ if (msg->msg_flags & ~MSG_DONTWAIT)
+ return -EINVAL;
- return irda_create( newsock, sk->protocol);
-#endif
- return 0;
-}
+ if (sk->shutdown & SEND_SHUTDOWN) {
+ send_sig(SIGPIPE, current, 0);
+ return -EPIPE;
+ }
-static int irda_shutdown( struct socket *sk, int how)
-{
- DEBUG( 0, __FUNCTION__ "()\n");
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
- /* FIXME - generate DM and RNR states */
- return -EOPNOTSUPP;
-}
+ /* Check if IrTTP is wants us to slow down */
+ while (self->tx_flow == FLOW_STOP) {
+ DEBUG(2, __FUNCTION__ "(), IrTTP is busy, going to sleep!\n");
+ interruptible_sleep_on(sk->sleep);
+
+ /* Check if we are still connected */
+ if (sk->state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+ }
+ skb = sock_alloc_send_skb(sk, len + IRDA_MAX_HEADER, 0,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return -ENOBUFS;
-unsigned int irda_poll( struct file *file, struct socket *sock,
- struct poll_table_struct *wait)
-{
+ skb_reserve(skb, IRDA_MAX_HEADER);
+ DEBUG(4, __FUNCTION__ "(), appending user data\n");
+ asmptr = skb->h.raw = skb_put(skb, len);
+ memcpy_fromiovec(asmptr, msg->msg_iov, len);
+
+ /*
+ * Just send the message to TinyTP, and let it deal with possible
+ * errors. No need to duplicate all that here
+ */
+ err = irttp_data_request(self->tsap, skb);
+ if (err) {
+ DEBUG(0, __FUNCTION__ "(), err=%d\n", err);
+ return err;
+ }
+ return len;
}
/*
- * Function irda_ioctl (sock, cmd, arg)
+ * Function irda_recvmsg (sock, msg, size, flags, scm)
*
- *
+ * Try to receive message and copy it to user
*
*/
-static int irda_ioctl( struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
-#if 0
- struct sock *sk = (struct sock *) sock->data;
- int err;
-
- DEBUG( 0, __FUNCTION__ "(), Not implemented!\n");
-#endif
- return 0;
-}
-
-static int irda_setsockopt( struct socket *sock, int level, int optname,
- char *optval, int optlen)
+static int irda_recvmsg(struct socket *sock, struct msghdr *msg, int size,
+ int flags, struct scm_cookie *scm)
{
-#if 0
- struct sock *sk = (struct sock *)sock->data;
- int err, opt;
+ struct irda_sock *self;
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int copied, err;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- if (level == SOL_SOCKET)
- return sock_setsockopt(sk, level, optname, optval, optlen);
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
- /* if (level != SOL_AX25) */
-/* return -EOPNOTSUPP; */
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return err;
- if (optval == NULL)
- return -EINVAL;
+ skb->h.raw = skb->data;
+ copied = skb->len;
+
+ if (copied > size) {
+ copied = size;
+ msg->msg_flags |= MSG_TRUNC;
+ }
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if ((err = verify_area(VERIFY_READ, optval, sizeof(int))) != 0)
- return err;
+ skb_free_datagram(sk, skb);
- opt = get_fs_long((int *)optval);
+ /*
+ * Check if we have previously stopped IrTTP and we know
+ * have more free space in our rx_queue. If so tell IrTTP
+ * to start delivering frames again before our rx_queue gets
+ * empty
+ */
+ if (self->rx_flow == FLOW_STOP) {
+ if ((atomic_read(&sk->rmem_alloc) << 2) <= sk->rcvbuf) {
+ DEBUG(2, __FUNCTION__ "(), Starting IrTTP\n");
+ self->rx_flow = FLOW_START;
+ irttp_flow_request(self->tsap, FLOW_START);
+ }
+ }
- switch (optname) {
- default:
- return -ENOPROTOOPT;
- }
-#endif
- return -ENOPROTOOPT;
+ return copied;
}
-static int irda_getsockopt(struct socket *sock, int level, int optname,
- char *optval, int *optlen)
+/*
+ * Function irda_shutdown (sk, how)
+ *
+ *
+ *
+ */
+static int irda_shutdown( struct socket *sk, int how)
{
-#if 0
- struct sock *sk = (struct sock *)sock->data;
- int val = 0;
- int err;
-
DEBUG( 0, __FUNCTION__ "()\n");
- if (level == SOL_SOCKET)
- return sock_getsockopt(sk, level, optname, optval, optlen);
-
- /* if (level != SOL_AX25) */
-/* return -EOPNOTSUPP; */
+ /* FIXME - generate DM and RNR states */
+ return -EOPNOTSUPP;
+}
- switch (optname) {
- default:
- return -ENOPROTOOPT;
- }
-
- if ((err = verify_area(VERIFY_WRITE, optlen, sizeof(int))) != 0)
- return err;
- put_user(sizeof(int), optlen);
-
- if ((err = verify_area(VERIFY_WRITE, optval, sizeof(int))) != 0)
- return err;
+/*
+ * Function irda_poll (file, sock, wait)
+ *
+ *
+ *
+ */
+unsigned int irda_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait)
+{
+ DEBUG(0, __FUNCTION__ "()\n");
- put_user(val, (int *)optval);
-#endif
- return 0;
+ return 0;
}
-static int irda_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg)
+/*
+ * Function irda_ioctl (sock, cmd, arg)
+ *
+ *
+ *
+ */
+static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ struct sock *sk = sock->sk;
- return -EINVAL;
+ DEBUG(0, __FUNCTION__ "(), cmd=%#x\n", cmd);
+
+ switch (cmd) {
+ case TIOCOUTQ: {
+ long amount;
+ amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
+ if (amount < 0)
+ amount = 0;
+ if (put_user(amount, (unsigned int *)arg))
+ return -EFAULT;
+ return 0;
+ }
+
+ case TIOCINQ: {
+ struct sk_buff *skb;
+ long amount = 0L;
+ /* These two are safe on a single CPU system as only user tasks fiddle here */
+ if ((skb = skb_peek(&sk->receive_queue)) != NULL)
+ amount = skb->len;
+ if (put_user(amount, (unsigned int *)arg))
+ return -EFAULT;
+ return 0;
+ }
+
+ case SIOCGSTAMP:
+ if (sk != NULL) {
+ if (sk->stamp.tv_sec == 0)
+ return -ENOENT;
+ if (copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ return -EINVAL;
+
+ default:
+ return dev_ioctl(cmd, (void *) arg);
+ }
+
+ /*NOTREACHED*/
+ return 0;
}
+
/*
- * Function irda_rcv (skb, dev, dev_addr, ptype)
+ * Function irda_setsockopt (sock, level, optname, optval, optlen)
*
- *
+ * Set some options for the socket
*
*/
-static int irda_rcv( struct sk_buff *skb, struct device *dev,
- irda_address *dev_addr, struct packet_type *ptype)
+static int irda_setsockopt(struct socket *sock, int level, int optname,
+ char *optval, int optlen)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ int opt;
+
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ self = sk->protinfo.irda;
+ ASSERT(self != NULL, return -1;);
+ if (level != SOL_IRLMP)
+ return -ENOPROTOOPT;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(opt, (int *)optval))
+ return -EFAULT;
+
+ switch (optname) {
+ case IRLMP_IAS_SET:
+ DEBUG(0, __FUNCTION__ "(), sorry not impl. yet!\n");
+ return 0;
+ case IRTTP_MAX_SDU_SIZE:
+ DEBUG(0, __FUNCTION__ "(), setting max_sdu_size = %d\n", opt);
+ self->max_sdu_size_rx = opt;
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
return 0;
}
/*
- * Function irda_driver_rcv (skb, dev, ptype)
+ * Function irda_getsockopt (sock, level, optname, optval, optlen)
*
*
*
*/
-static int irda_driver_rcv( struct sk_buff *skb, struct device *dev,
- struct packet_type *ptype)
+static int irda_getsockopt(struct socket *sock, int level, int optname,
+ char *optval, int *optlen)
{
- skb->sk = NULL; /* Initially we don't know who it's for */
-
- DEBUG( 0, __FUNCTION__ "()\n");
+ struct sock *sk = sock->sk;
+ struct irda_sock *self;
+ struct irda_device_list *list;
+ __u8 optbuf[sizeof(struct irda_device_list) +
+ sizeof(struct irda_device_info)*10];
+ discovery_t *discovery;
+ int val = 0;
+ int len = 0;
+ int i = 0;
-#if 0
- if ((*skb->data & 0x0F) != 0) {
- kfree_skb(skb, FREE_READ); /* Not a KISS data frame */
- return 0;
- }
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ self = sk->protinfo.irda;
- /* skb_pull(skb, AX25_KISS_HEADER_LEN); */ /* Remove the KISS byte */
+ if (level != SOL_IRLMP)
+ return -ENOPROTOOPT;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ switch (optname) {
+ case IRLMP_ENUMDEVICES:
+ DEBUG(1, __FUNCTION__ "(), IRLMP_ENUMDEVICES\n");
+
+ /* Tell IrLMP we want to be notified */
+ irlmp_update_client(self->ckey, self->mask, NULL,
+ irda_discovery_indication);
- return irda_rcv( skb, dev, (irda_address *)dev->dev_addr, ptype);
-#endif
- return NULL;
+ /* Do some discovery */
+ irlmp_discovery_request(self->nslots);
+
+ /* Devices my be discovered already */
+ if (!cachelog) {
+ DEBUG(2, __FUNCTION__ "(), no log!\n");
+
+ /* Sleep until device(s) discovered */
+ interruptible_sleep_on(&discovery_wait);
+ if (!cachelog)
+ return -1;
+ }
+
+ list = (struct irda_device_list *) optbuf;
+ /*
+ * Now, check all discovered devices (if any), and notify
+ * client only about the services that the client is
+ * interested in
+ */
+ discovery = (discovery_t *) hashbin_get_first(cachelog);
+ while (discovery != NULL) {
+ /* Mask out the ones we don't want */
+ if (discovery->hints.word & self->mask) {
+ /* Copy discovery information */
+ list->dev[i].saddr = discovery->saddr;
+ list->dev[i].daddr = discovery->daddr;
+ list->dev[i].charset = discovery->charset;
+ list->dev[i].hints[0] = discovery->hints.byte[0];
+ list->dev[i].hints[1] = discovery->hints.byte[1];
+ strncpy(list->dev[i].info, discovery->info, 22);
+ if (++i >= 10)
+ break;
+ }
+ discovery = (discovery_t *) hashbin_get_next(cachelog);
+ }
+ cachelog = NULL;
+
+ list->len = i;
+ len = sizeof(struct irda_device_list) +
+ sizeof(struct irda_device_info) * i;
+
+ DEBUG(1, __FUNCTION__ "(), len=%d, i=%d\n", len, i);
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &optbuf, len))
+ return -EFAULT;
+ break;
+ case IRTTP_MAX_SDU_SIZE:
+ if (self->max_sdu_size_tx != SAR_DISABLE)
+ val = self->max_sdu_size_tx;
+ else
+ /* SAR is disabled, so use the IrLAP data size
+ * instead */
+ val = self->qos_tx.data_size.value - IRDA_MAX_HEADER;
+
+ DEBUG(0, __FUNCTION__ "(), getting max_sdu_size = %d\n", val);
+ len = sizeof(int);
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ return 0;
}
+static struct net_proto_family irda_family_ops =
+{
+ PF_IRDA,
+ irda_create
+};
+
static struct proto_ops irda_proto_ops = {
- AF_IRDA,
+ PF_IRDA,
- irda_create,
- irda_dup,
+ sock_no_dup,
irda_release,
irda_bind,
irda_connect,
- irda_socketpair,
+ sock_no_socketpair,
irda_accept,
irda_getname,
irda_poll,
@@ -646,32 +1126,38 @@ static struct proto_ops irda_proto_ops = {
irda_shutdown,
irda_setsockopt,
irda_getsockopt,
- irda_fcntl,
+ sock_no_fcntl,
irda_sendmsg,
irda_recvmsg
};
-#endif /* IRDA_SOCKETS */
-
-static int irda_device_event( struct notifier_block *this, unsigned long event,
- void *ptr)
+/*
+ * Function irda_device_event (this, event, ptr)
+ *
+ *
+ *
+ */
+static int irda_device_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
- /* struct device *dev = (struct device *) ptr; */
+ struct device *dev = (struct device *) ptr;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(3, __FUNCTION__ "()\n");
- /* Reject non AX.25 devices */
- /* if (dev->type != ARPHRD_AX25) */
-/* return NOTIFY_DONE; */
+ /* Reject non IrDA devices */
+ if (dev->type != ARPHRD_IRDA)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
- /* ax25_dev_device_up(dev); */
+ DEBUG(3, __FUNCTION__ "(), NETDEV_UP\n");
+ /* irda_dev_device_up(dev); */
break;
case NETDEV_DOWN:
- /* ax25_kill_by_device(dev); */
-/* ax25_rt_device_down(dev); */
-/* ax25_dev_device_down(dev); */
+ DEBUG(3, __FUNCTION__ "(), NETDEV_DOWN\n");
+ /* irda_kill_by_device(dev); */
+ /* irda_rt_device_down(dev); */
+ /* irda_dev_device_down(dev); */
break;
default:
break;
@@ -684,7 +1170,7 @@ static struct packet_type irda_packet_type =
{
0, /* MUTTER ntohs(ETH_P_IRDA),*/
NULL,
- irlap_input, /* irda_driver_rcv, */
+ irlap_driver_rcv,
NULL,
NULL,
};
@@ -701,17 +1187,17 @@ static struct notifier_block irda_dev_notifier = {
* Initialize IrDA protocol layer
*
*/
-void irda_proto_init(struct net_proto *pro)
+__initfunc(void irda_proto_init(struct net_proto *pro))
{
DEBUG( 4, __FUNCTION__ "\n");
- /* sock_register( irda_proto_ops.family, &irda_proto_ops); */
+ sock_register(&irda_family_ops);
+
irda_packet_type.type = htons(ETH_P_IRDA);
dev_add_pack(&irda_packet_type);
- /* register_netdevice_notifier( &irda_dev_notifier); */
+ register_netdevice_notifier( &irda_dev_notifier);
- /* printk( KERN_INFO "IrDA Sockets for Linux (Dag Brattli)\n"); */
irda_init();
}
@@ -721,6 +1207,7 @@ void irda_proto_init(struct net_proto *pro)
* Remove IrDA protocol layer
*
*/
+#ifdef MODULE
void irda_proto_cleanup(void)
{
DEBUG( 4, __FUNCTION__ "\n");
@@ -728,11 +1215,11 @@ void irda_proto_cleanup(void)
irda_packet_type.type = htons(ETH_P_IRDA);
dev_remove_pack(&irda_packet_type);
- /* unregister_netdevice_notifier( &irda_dev_notifier); */
+ unregister_netdevice_notifier( &irda_dev_notifier);
- /* (void) sock_unregister( irda_proto_ops.family); */
+ sock_unregister(PF_IRDA);
irda_cleanup();
return;
}
-
+#endif /* MODULE */
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
new file mode 100644
index 000000000..22def3a1e
--- /dev/null
+++ b/net/irda/discovery.c
@@ -0,0 +1,245 @@
+/*********************************************************************
+ *
+ * Filename: discovery.c
+ * Version: 0.1
+ * Description: Routines for handling discoveries at the IrLMP layer
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Tue Apr 6 15:33:50 1999
+ * Modified at: Sun Apr 11 00:41:58 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ ********************************************************************/
+
+#include <linux/socket.h>
+#include <linux/irda.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irlmp.h>
+
+#include <net/irda/discovery.h>
+
+/*
+ * Function irlmp_add_discovery (cachelog, discovery)
+ *
+ *
+ *
+ */
+void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *discovery)
+{
+ discovery_t *old;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ /* Check if we have discovered this device before */
+ old = hashbin_remove(cachelog, discovery->daddr, NULL);
+ if (old)
+ kfree(old);
+
+ /* Insert the new and updated version */
+ hashbin_insert(cachelog, (QUEUE *) discovery, discovery->daddr, NULL);
+}
+
+/*
+ * Function irlmp_add_discovery_log (cachelog, log)
+ *
+ *
+ *
+ */
+void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
+{
+ discovery_t *discovery;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ /*
+ * If log is missing this means that IrLAP was unable to perform the
+ * discovery, so restart discovery again with just the half timeout
+ * of the normal one.
+ */
+ if (log == NULL) {
+ /* irlmp_start_discovery_timer(irlmp, 150); */
+ return;
+ }
+
+ discovery = (discovery_t *) hashbin_remove_first(log);
+ while (discovery != NULL) {
+ irlmp_add_discovery(cachelog, discovery);
+
+ discovery = (discovery_t *) hashbin_remove_first(log);
+ }
+
+ /* Delete the now empty log */
+ hashbin_delete(log, (FREE_FUNC) kfree);
+}
+
+/*
+ * Function irlmp_expire_discoveries (log, saddr, force)
+ *
+ * Go through all discoveries and expire all that has stayed to long
+ *
+ */
+void irlmp_expire_discoveries(hashbin_t *log, int saddr, int force)
+{
+ discovery_t *discovery, *curr;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ discovery = (discovery_t *) hashbin_get_first(log);
+ while (discovery != NULL) {
+ curr = discovery;
+
+ /* Be sure to be one item ahead */
+ discovery = (discovery_t *) hashbin_get_next(log);
+
+ /* Test if it's time to expire this discovery */
+ if ((curr->saddr == saddr) && (force ||
+ ((jiffies - curr->timestamp) > DISCOVERY_EXPIRE_TIMEOUT)))
+ {
+ curr = hashbin_remove(log, curr->daddr, NULL);
+ if (curr)
+ kfree(curr);
+ }
+ }
+}
+
+/*
+ * Function irlmp_dump_discoveries (log)
+ *
+ * Print out all discoveries in log
+ *
+ */
+void irlmp_dump_discoveries(hashbin_t *log)
+{
+ discovery_t *discovery;
+
+ ASSERT(log != NULL, return;);
+
+ discovery = (discovery_t *) hashbin_get_first(log);
+ while (discovery != NULL) {
+ DEBUG(0, "Discovery:\n");
+ DEBUG(0, " daddr=%08x\n", discovery->daddr);
+ DEBUG(0, " saddr=%08x\n", discovery->saddr);
+ DEBUG(0, " name=%s\n", discovery->info);
+
+ discovery = (discovery_t *) hashbin_get_next(log);
+ }
+}
+
+/*
+ * Function irlmp_find_device (name, saddr)
+ *
+ * Look through the discovery log at each of the links and try to find
+ * the device with the given name. Return daddr and saddr. If saddr is
+ * specified, that look at that particular link only (not impl).
+ */
+__u32 irlmp_find_device(hashbin_t *cachelog, char *name, __u32 *saddr)
+{
+ discovery_t *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irlmp->lock, flags);
+
+ /* Look at all discoveries for that link */
+ d = (discovery_t *) hashbin_get_first(cachelog);
+ while (d != NULL) {
+ DEBUG(1, "Discovery:\n");
+ DEBUG(1, " daddr=%08x\n", d->daddr);
+ DEBUG(1, " name=%s\n", d->info);
+
+ if (strcmp(name, d->info) == 0) {
+ *saddr = d->saddr;
+
+ spin_unlock_irqrestore(&irlmp->lock, flags);
+ return d->daddr;
+ }
+ d = (discovery_t *) hashbin_get_next(cachelog);
+ }
+
+ spin_unlock_irqrestore(&irlmp->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Function proc_discovery_read (buf, start, offset, len, unused)
+ *
+ * Print discovery information in /proc file system
+ *
+ */
+int discovery_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused)
+{
+ discovery_t *discovery;
+ unsigned long flags;
+ hashbin_t *cachelog = irlmp_get_cachelog();
+
+ if (!irlmp)
+ return len;
+
+ len = sprintf(buf, "IrLMP: Discovery log:\n\n");
+
+ save_flags(flags);
+ cli();
+
+ discovery = (discovery_t *) hashbin_get_first(cachelog);
+ while ( discovery != NULL) {
+ len += sprintf( buf+len, " name: %s,",
+ discovery->info);
+
+ len += sprintf( buf+len, " hint: ");
+ if ( discovery->hints.byte[0] & HINT_PNP)
+ len += sprintf( buf+len, "PnP Compatible ");
+ if ( discovery->hints.byte[0] & HINT_PDA)
+ len += sprintf( buf+len, "PDA/Palmtop ");
+ if ( discovery->hints.byte[0] & HINT_COMPUTER)
+ len += sprintf( buf+len, "Computer ");
+ if ( discovery->hints.byte[0] & HINT_PRINTER)
+ len += sprintf( buf+len, "Printer ");
+ if ( discovery->hints.byte[0] & HINT_MODEM)
+ len += sprintf( buf+len, "Modem ");
+ if ( discovery->hints.byte[0] & HINT_FAX)
+ len += sprintf( buf+len, "Fax ");
+ if ( discovery->hints.byte[0] & HINT_LAN)
+ len += sprintf( buf+len, "LAN Access ");
+
+ if ( discovery->hints.byte[1] & HINT_TELEPHONY)
+ len += sprintf( buf+len, "Telephony ");
+ if ( discovery->hints.byte[1] & HINT_FILE_SERVER)
+ len += sprintf( buf+len, "File Server ");
+ if ( discovery->hints.byte[1] & HINT_COMM)
+ len += sprintf( buf+len, "IrCOMM ");
+ if ( discovery->hints.byte[1] & HINT_OBEX)
+ len += sprintf( buf+len, "IrOBEX ");
+
+ len += sprintf(buf+len, ", saddr: 0x%08x",
+ discovery->saddr);
+
+ len += sprintf(buf+len, ", daddr: 0x%08x\n",
+ discovery->daddr);
+
+ len += sprintf( buf+len, "\n");
+
+ discovery = (discovery_t *) hashbin_get_next(cachelog);
+ }
+ restore_flags(flags);
+
+ return len;
+}
diff --git a/net/irda/ircomm/Makefile b/net/irda/ircomm/Makefile
index 1ff2e45be..7df055ea1 100644
--- a/net/irda/ircomm/Makefile
+++ b/net/irda/ircomm/Makefile
@@ -8,9 +8,9 @@
# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := ircomm_n_vtd.o
-O_OBJS := ircomm_common.o attach.o irvtd.o irvtd_driver.o
+O_OBJS := ircomm_common.o irvtd_driver.o
M_OBJS := ircomm.o ircomm_tty.o
-MI_OBJS := ircomm_common.o attach.o irvtd.o irvtd_driver.o
+MI_OBJS := ircomm_common.o irvtd_driver.o
OX_OBJS +=
@@ -18,10 +18,10 @@ OX_OBJS +=
ifeq ($(CONFIG_IRCOMM),m)
ircomm.o: $(MI_OBJS)
- $(LD) $(LD_RFLAG) -r -o $@ ircomm_common.o attach.o
+ $(LD) $(LD_RFLAG) -r -o $@ ircomm_common.o
ircomm_tty.o: $(MI_OBJS)
- $(LD) $(LD_RFLAG) -r -o $@ irvtd.o irvtd_driver.o
+ $(LD) $(LD_RFLAG) -r -o $@ irvtd_driver.o
endif
include $(TOPDIR)/Rules.make
diff --git a/net/irda/ircomm/attach.c b/net/irda/ircomm/attach.c
deleted file mode 100644
index 1e5e373ee..000000000
--- a/net/irda/ircomm/attach.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*********************************************************************
- *
- * Filename: attach.c
- * Version:
- * Description: An implementation of IrCOMM service interface.
- * Status: Experimental.
- * Author: Takahide Higuchi <thiguchi@pluto.dti.ne.jp>
- *
- * Copyright (c) 1998, Takahide Higuchi, <thiguchi@pluto.dti.ne.jp>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * I, Takahide Higuchi, provide no warranty for any of this software.
- * This material is provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-
-/*
- * ----------------------------------------------------------------------
- * IrIAS related things for IrCOMM
- * If you are to use ircomm layer, use ircomm_attach_cable to
- * setup it and register your program.
- * ----------------------------------------------------------------------
- */
-
-
-#include <linux/sched.h>
-#include <linux/tqueue.h>
-
-#include <net/irda/irlap.h>
-#include <net/irda/irttp.h>
-#include <net/irda/iriap.h>
-#include <net/irda/irias_object.h>
-
-#include <net/irda/ircomm_common.h>
-
-extern struct ircomm_cb **ircomm;
-struct ircomm_cb *discovering_instance;
-
-static void got_lsapsel(struct ircomm_cb * info);
-static void query_lsapsel(struct ircomm_cb * self);
-void ircomm_getvalue_confirm( __u16 obj_id, struct ias_value *value, void *priv );
-
-#if 0
-static char *rcsid = "$Id: attach.c,v 1.11 1998/10/22 12:02:20 dagb Exp $";
-#endif
-
-
-/*
- * handler for iriap_getvaluebyclass_request()
- *
- */
-
-void ircomm_getvalue_confirm( __u16 obj_id, struct ias_value *value, void *priv ){
-
- struct ircomm_cb *self = (struct ircomm_cb *) priv;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRCOMM_MAGIC, return;);
-
- DEBUG(0, __FUNCTION__"type(%d)\n", value->type);
-
- switch(value->type){
-
- case IAS_OCT_SEQ:
- /*
- * FIXME:we should use data which came here
- * it is used for nothing at this time
- */
-
-#if 1
- DEBUG(0, "octet sequence is:\n");
- {
- int i;
- for ( i=0;i<value->len;i++)
- printk("%02x",
- (int)(*value->t.oct_seq + i) );
- printk("\n");
- }
-#endif
- query_lsapsel(self);
- break;
-
- case IAS_INTEGER:
- /* LsapSel seems to be sent to me */
-
- if ( value->t.integer == -1){
- DEBUG( 0, "ircomm_getvalue_confirm: invalid value!\n");
- return;
- }
- if(self->state == COMM_IDLE){
- self->dlsap = value->t.integer;
- got_lsapsel(self);
- }
- break;
-
- case IAS_STRING:
- DEBUG( 0, __FUNCTION__":STRING is not implemented\n");
- DEBUG( 0, __FUNCTION__":received string:%s\n",
- value->t.string);
- query_lsapsel(self); /* experiment */
- break;
-
- case IAS_MISSING:
- DEBUG( 0, __FUNCTION__":MISSING is not implemented\n");
- break;
-
- default:
- DEBUG( 0, __FUNCTION__":unknown type!\n");
- break;
- }
-}
-
-
-static void got_lsapsel(struct ircomm_cb * self){
-
- struct notify_t notify;
-
- DEBUG(0, "ircomm:got_lsapsel: got peersap!(%d)\n", self->dlsap );
-
- /* remove tsap for server */
- irttp_close_tsap(self->tsap);
-
- /* create TSAP for initiater ... */
- irda_notify_init(&notify);
- notify.data_indication = ircomm_accept_data_indication;
- notify.connect_confirm = ircomm_accept_connect_confirm;
- notify.connect_indication = ircomm_accept_connect_indication;
- notify.flow_indication = ircomm_accept_flow_indication;
- notify.disconnect_indication = ircomm_accept_disconnect_indication;
- strncpy( notify.name, "IrCOMM cli", NOTIFY_MAX_NAME);
- notify.instance = self;
-
- self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
- &notify );
- ASSERT(self->tsap != NULL, return;);
-
-
- /*
- * invoke state machine
- * and notify that I'm ready to accept connect_request
- */
-
- ircomm_next_state(self, COMM_IDLE);
- if(self->d_handler)
- self->d_handler(self);
-}
-
-
-
-
-
-static void query_lsapsel(struct ircomm_cb * self){
-
- DEBUG(0, "ircomm:query_lsapsel..\n");
-
- /*
- * since we've got Parameters field of IAS, we are to get peersap.
- */
-
- if(!(self->servicetype & THREE_WIRE_RAW)){
- iriap_getvaluebyclass_request
- (self->daddr, "IrDA:IrCOMM", "IrDA:TinyTP:LsapSel",
- ircomm_getvalue_confirm, self );
- } else {
- DEBUG(0,"ircomm:query_lsap:"
- "THREE_WIRE_RAW is not implemented!\n");
- }
-}
-
-
-
-/*
- * ircomm_discovery_indication()
- * Remote device is discovered, try query the remote IAS to see which
- * device it is, and which services it has.
- */
-
-void ircomm_discovery_indication( DISCOVERY *discovery)
-{
-
- struct ircomm_cb *self;
-
- DEBUG( 0, "ircomm_discovery_indication\n");
-
- self = discovering_instance;
- ASSERT(self != NULL, return;);
- ASSERT(self->magic == IRCOMM_MAGIC, return;);
-
- self->daddr = discovery->daddr;
-
- DEBUG( 0, "ircomm_discovery_indication:daddr=%08x\n", self->daddr);
-
- /* query "Parameters" attribute of LM-IAS */
-
- DEBUG(0, "ircomm:querying parameters..\n");
-#if 0
- iriap_getvaluebyclass_request(self->daddr, "IrDA:IrCOMM",
-
- "Parameters",
- ircomm_getvalue_confirm,
- self);
-#else
- query_lsapsel(self);
-#endif
- return;
-}
-
-
-struct ircomm_cb * ircomm_attach_cable( __u8 servicetype,
- struct notify_t notify,
- void *handler ){
-
- int i;
- struct ircomm_cb *self = NULL;
- struct notify_t server_notify;
- struct ias_object* obj;
-
- /* FIXME: it should not be hard coded */
- __u8 oct_seq[6] = { 0,1,4,1,1,1 };
-
- ASSERT(ircomm != NULL,return NULL;);
- DEBUG(0,"ircomm_attach_cable:\n");
-
-
- /* find free handle */
-
- for(i = 0; i < IRCOMM_MAX_CONNECTION; i++){
- ASSERT(ircomm[i] != NULL,return(NULL););
- if(!ircomm[i]->in_use){
- self = ircomm[i];
- break;
- }
- }
-
- if (!self){
- DEBUG(0,"ircomm_attach_cable:no free handle!\n");
- return (NULL);
- }
-
- self->in_use = 1;
- self->servicetype = servicetype;
-
- DEBUG(0,"attach_cable:servicetype:%d\n",servicetype);
- self->d_handler = handler;
- self->notify = notify;
-
- /* register server.. */
-
- /*
- * TODO: since server TSAP is currentry hard coded,
- * we can use *only one* IrCOMM connection.
- * We have to create one more TSAP and register IAS entry dynamically
- * each time when we are to allocate new server here.
- */
- irda_notify_init(&server_notify);
- server_notify.data_indication = ircomm_accept_data_indication;
- server_notify.connect_confirm = ircomm_accept_connect_confirm;
- server_notify.connect_indication = ircomm_accept_connect_indication;
- server_notify.flow_indication = ircomm_accept_flow_indication;
- server_notify.disconnect_indication = ircomm_accept_disconnect_indication;
- server_notify.instance = self;
- strncpy( server_notify.name, "IrCOMM srv", NOTIFY_MAX_NAME);
-
- self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
- &server_notify);
- if(!self->tsap){
- DEBUG(0,"ircomm:Sorry, failed to allocate server_tsap\n");
- return NULL;
- }
-
- /*
- * Register with LM-IAS
- */
- obj = irias_new_object( "IrDA:IrCOMM", IAS_IRCOMM_ID);
- irias_add_integer_attrib( obj, "IrDA:TinyTP:LsapSel",
- self->tsap->stsap_sel );
-
- /* FIXME: it should not be hard coded */
-
- irias_add_octseq_attrib( obj, "Parameters",
- &oct_seq[0], 6);
- irias_insert_object( obj);
-
-/* obj = irias_new_object( "IrDA:IrCOMM", IAS_IRCOMM_ID); */
-/* irias_add_octseq_attrib( obj, "Parameters", len, &octseq); */
-/* irias_insert_object( obj); */
-
-
-
- /* and start discovering .. */
- discovering_instance = self;
-
- switch(servicetype){
- case NINE_WIRE:
- DEBUG(0,"ircomm_attach_cable:discovering..\n");
- irlmp_register_layer(S_COMM , CLIENT|SERVER, TRUE,
- ircomm_discovery_indication);
- break;
-
-/* case CENTRONICS: */
-/* case THREE_WIRE: */
-/* case THREE_WIRE_RAW: */
-
- default:
- DEBUG(0,"ircomm_attach_cable:requested servicetype is not "
- "implemented!\n");
- return NULL;
- }
-
- ircomm_next_state(self, COMM_IDLE);
- return (self);
-}
-
-
-
-
-int ircomm_detach_cable(struct ircomm_cb *self){
-
- ASSERT( self != NULL, return -EIO;);
- ASSERT( self->magic == IRCOMM_MAGIC, return -EIO;);
-
-
- DEBUG(0,"ircomm_detach_cable:\n");
-
- /* shutdown ircomm layer */
- if(self->state != COMM_IDLE ){
- DEBUG(0,"ircomm:detach_cable:not IDLE\n");
- if(self->state != COMM_WAITI)
- ircomm_disconnect_request(self, NULL);
- }
-
-
- switch(self->servicetype){
-/* case CENTRONICS: */
- case NINE_WIRE:
-/* case THREE_WIRE: */
- irlmp_unregister_layer( S_COMM, CLIENT|SERVER );
- break;
-
-/* case THREE_WIRE_RAW: */
-/* irlmp_unregister( S_COMM ) */
-/* irlmp_unregister( S_PRINTER) */
-/* break; */
-
- default:
- DEBUG(0,"ircomm_detach_cable:requested servicetype is not "
- "implemented!\n");
- return -ENODEV;
- }
-
- /* remove tsaps */
- if(self->tsap)
- irttp_close_tsap(self->tsap);
-
- self->tsap = NULL;
- self->in_use = 0;
- return 0;
-}
diff --git a/net/irda/ircomm/ircomm_common.c b/net/irda/ircomm/ircomm_common.c
index cfda567ce..bc8758e1e 100644
--- a/net/irda/ircomm/ircomm_common.c
+++ b/net/irda/ircomm/ircomm_common.c
@@ -33,25 +33,29 @@
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <net/irda/irmod.h>
+#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
#include <net/irda/iriap.h>
#include <net/irda/irttp.h>
+#include <net/irda/irias_object.h>
#include <net/irda/ircomm_common.h>
-#if 0
-static char *rcsid = "$Id: ircomm_common.c,v 1.13 1998/10/13 12:59:05 takahide Exp $";
-#endif
-static char *version = "IrCOMM_common, $Revision: 1.13 $ $Date: 1998/10/13 12:59:05 $ (Takahide Higuchi)";
-
+static char *revision_date = "Sun Apr 18 00:40:19 1999";
-
-static void ircomm_state_discovery( struct ircomm_cb *self,
- IRCOMM_EVENT event, struct sk_buff *skb );
static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb );
+
+static void ircomm_state_discoverywait( struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb );
+
+static void ircomm_state_queryparamwait( struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb );
+
+static void ircomm_state_querylsapwait( struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb );
+
static void ircomm_state_waiti( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb );
static void ircomm_state_waitr( struct ircomm_cb *self, IRCOMM_EVENT event,
@@ -60,15 +64,43 @@ static void ircomm_state_conn( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb );
static void ircomm_do_event( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb);
-void ircomm_next_state( struct ircomm_cb *self, IRCOMM_STATE state);
+static void ircomm_next_state( struct ircomm_cb *self, IRCOMM_STATE state);
+
+static void ircomm_discovery_indication(discovery_t *discovery);
+static void ircomm_tx_controlchannel(struct ircomm_cb *self );
+static int ircomm_proc_read(char *buf, char **start, off_t offset,
+ int len, int unused);
+
+static void start_discovering(struct ircomm_cb *self);
+static void query_lsapsel(struct ircomm_cb * self);
+static void query_parameters(struct ircomm_cb *self);
+static void queryias_done(struct ircomm_cb *self);
+static void ircomm_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv);
+
+
+struct ircomm_cb *discovering_instance;
+
+/*
+ * debug parameter ircomm_cs:
+ * 0 = client/server, 1 = client only 2 = server only
+ * usage for example:
+ * insmod ircomm ircomm_cs=1
+ * LILO boot : Linux ircomm_cs=2 etc.
+ */
+
+static int ircomm_cs = 0;
+MODULE_PARM(ircomm_cs, "i");
+
-int ircomm_check_handle(int handle);
-static void ircomm_parse_control(struct ircomm_cb *self, struct sk_buff *skb,
- int type);
static char *ircommstate[] = {
- "DISCOVERY",
"IDLE",
+
+ "DISCOVERY_WAIT",
+ "QUERYPARAM_WAIT",
+ "QUERYLSAP_WAIT",
+
"WAITI",
"WAITR",
"CONN",
@@ -107,38 +139,38 @@ static char *ircommevent[] = {
"IRCOMM_DATA_REQUEST",
"LMP_DATA_INDICATION",
"IRCOMM_CONTROL_REQUEST",
-};
-int ircomm_proc_read(char *buf, char **start, off_t offset,
- int len, int unused);
+ "DISCOVERY_INDICATION",
+ "GOT_PARAMETERS",
+ "GOT_LSAPSEL",
+ "QUERYIAS_ERROR",
+};
#ifdef CONFIG_PROC_FS
-extern struct proc_dir_entry proc_irda;
-struct proc_dir_entry proc_ircomm = {
- 0, 6, "ircomm",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL,
- &ircomm_proc_read,
-};
+extern struct proc_dir_entry *proc_irda;
#endif
static void (*state[])( struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb) =
{
- ircomm_state_discovery,
ircomm_state_idle,
+
+ ircomm_state_discoverywait,
+ ircomm_state_queryparamwait,
+ ircomm_state_querylsapwait,
+
ircomm_state_waiti,
ircomm_state_waitr,
ircomm_state_conn,
};
-
__initfunc(int ircomm_init(void))
{
int i;
- printk( KERN_INFO "%s\n", version);
- DEBUG( 4, "ircomm_common:init_module\n");
+ printk( "Linux-IrDA: IrCOMM protocol ( revision:%s ) \n",
+ revision_date);
+ DEBUG( 4, __FUNCTION__"()\n");
/* allocate master array */
@@ -146,7 +178,7 @@ __initfunc(int ircomm_init(void))
IRCOMM_MAX_CONNECTION,
GFP_KERNEL);
if ( ircomm == NULL) {
- printk( KERN_WARNING "IrCOMM: Can't allocate ircomm array!\n");
+ printk( KERN_ERR __FUNCTION__"(): kmalloc failed!\n");
return -ENOMEM;
}
@@ -158,7 +190,7 @@ __initfunc(int ircomm_init(void))
ircomm[i] = kmalloc( sizeof(struct ircomm_cb), GFP_KERNEL );
if(!ircomm[i]){
- printk(KERN_ERR "ircomm:kmalloc failed!\n");
+ printk( KERN_ERR __FUNCTION__"(): kmalloc failed!\n");
return -ENOMEM;
}
@@ -191,17 +223,20 @@ __initfunc(int ircomm_init(void))
*/
#ifdef CONFIG_PROC_FS
- proc_register( &proc_irda, &proc_ircomm);
+ create_proc_entry("ircomm", 0, proc_irda)->get_info = ircomm_proc_read;
#endif /* CONFIG_PROC_FS */
+
+ discovering_instance = NULL;
return 0;
}
+#ifdef MODULE
void ircomm_cleanup(void)
{
int i;
- DEBUG( 4, "ircomm_common:cleanup_module\n");
+ DEBUG( 4, "ircomm:cleanup_module\n");
/*
* free some resources
*/
@@ -226,9 +261,10 @@ void ircomm_cleanup(void)
}
#ifdef CONFIG_PROC_FS
- proc_unregister( &proc_irda, proc_ircomm.low_ino);
-#endif
+ remove_proc_entry("ircomm", proc_irda);
+#endif /* CONFIG_PROC_FS */
}
+#endif /* MODULE */
/*
* ----------------------------------------------------------------------
@@ -236,21 +272,27 @@ void ircomm_cleanup(void)
* ----------------------------------------------------------------------
*/
-void ircomm_accept_data_indication(void *instance, void *sap, struct sk_buff *skb){
+static int ircomm_accept_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
struct ircomm_cb *self = (struct ircomm_cb *)instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRCOMM_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT( self != NULL, return -1;);
+ ASSERT( self->magic == IRCOMM_MAGIC, return -1;);
+ ASSERT( skb != NULL, return -1;);
- DEBUG(4,"ircomm_accept_data_indication:\n");
+ DEBUG(4,__FUNCTION__"():\n");
ircomm_do_event( self, TTP_DATA_INDICATION, skb);
+ self->rx_packets++;
+
+ return 0;
}
-void ircomm_accept_connect_confirm(void *instance, void *sap,
+static void ircomm_accept_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
- int maxsdusize, struct sk_buff *skb){
+ __u32 maxsdusize, struct sk_buff *skb)
+{
struct ircomm_cb *self = (struct ircomm_cb *)instance;
@@ -259,7 +301,7 @@ void ircomm_accept_connect_confirm(void *instance, void *sap,
ASSERT( skb != NULL, return;);
ASSERT( qos != NULL, return;);
- DEBUG(0,"ircomm_accept_connect_confirm:\n");
+ DEBUG(0,__FUNCTION__"(): got connected!\n");
if(maxsdusize == SAR_DISABLE)
self->max_txbuff_size = qos->data_size.value;
@@ -274,11 +316,11 @@ void ircomm_accept_connect_confirm(void *instance, void *sap,
ircomm_do_event( self, TTP_CONNECT_CONFIRM, skb);
}
-void ircomm_accept_connect_indication(void *instance, void *sap,
+static void ircomm_accept_connect_indication(void *instance, void *sap,
struct qos_info *qos,
- int maxsdusize,
- struct sk_buff *skb ){
-
+ __u32 maxsdusize,
+ struct sk_buff *skb )
+{
struct ircomm_cb *self = (struct ircomm_cb *)instance;
ASSERT( self != NULL, return;);
@@ -286,7 +328,7 @@ void ircomm_accept_connect_indication(void *instance, void *sap,
ASSERT( skb != NULL, return;);
ASSERT( qos != NULL, return;);
- DEBUG(0,"ircomm_accept_connect_indication:\n");
+ DEBUG(0,__FUNCTION__"()\n");
if(maxsdusize == SAR_DISABLE)
self->max_txbuff_size = qos->data_size.value;
@@ -295,22 +337,31 @@ void ircomm_accept_connect_indication(void *instance, void *sap,
self->qos = qos;
ircomm_do_event( self, TTP_CONNECT_INDICATION, skb);
+
+ /* stop connecting */
+ wake_up_interruptible( &self->discovery_wait);
+ wake_up_interruptible( &self->ias_wait);
+
}
-void ircomm_accept_disconnect_indication(void *instance, void *sap, LM_REASON reason,
- struct sk_buff *skb){
+static void ircomm_accept_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *skb)
+{
struct ircomm_cb *self = (struct ircomm_cb *)instance;
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRCOMM_MAGIC, return;);
- ASSERT( skb != NULL, return;);
- DEBUG(0,"ircomm_accept_disconnect_indication:\n");
+ DEBUG(0,__FUNCTION__"():\n");
ircomm_do_event( self, TTP_DISCONNECT_INDICATION, skb);
+
}
-void ircomm_accept_flow_indication( void *instance, void *sap, LOCAL_FLOW cmd){
-
+static void ircomm_accept_flow_indication( void *instance, void *sap,
+ LOCAL_FLOW cmd)
+{
+ IRCOMM_CMD command;
struct ircomm_cb *self = (struct ircomm_cb *)instance;
ASSERT( self != NULL, return;);
@@ -318,73 +369,204 @@ void ircomm_accept_flow_indication( void *instance, void *sap, LOCAL_FLOW cmd){
switch(cmd){
case FLOW_START:
- DEBUG(0,"ircomm_accept_flow_indication:START\n");
-
- self->pi = TX_READY;
+ DEBUG(4,__FUNCTION__"():START\n");
+ command = TX_READY;
self->ttp_stop = 0;
if(self->notify.flow_indication)
self->notify.flow_indication( self->notify.instance,
- self, cmd);
- ircomm_control_request(self);
+ self, command);
break;
case FLOW_STOP:
- DEBUG(0,"ircomm_accept_flow_indication:STOP\n");
- self->pi = TX_BUSY;
+ DEBUG(4,__FUNCTION__":STOP\n");
+ command = TX_BUSY;
self->ttp_stop = 1;
if(self->notify.flow_indication)
self->notify.flow_indication( self->notify.instance,
- self, cmd);
+ self, command);
break;
default:
- DEBUG(0,"ircomm_accept_flow_indication:unknown status!\n");
+ DEBUG(0,__FUNCTION__"();unknown status!\n");
}
}
+
+/*
+ * ircomm_discovery_indication()
+ * Remote device is discovered, try query the remote IAS to see which
+ * device it is, and which services it has.
+ */
+
+static void ircomm_discovery_indication(discovery_t *discovery)
+{
+ struct ircomm_cb *self;
+
+ self = discovering_instance;
+ if(self == NULL)
+ return;
+ ASSERT(self->magic == IRCOMM_MAGIC, return;);
+
+ self->daddr = discovery->daddr;
+ self->saddr = discovery->saddr;
+
+ DEBUG( 0, __FUNCTION__"():daddr=%08x\n", self->daddr);
+
+ ircomm_do_event(self, DISCOVERY_INDICATION, NULL);
+ return;
+}
+
+/*
+ * ircomm_getvalue_confirm()
+ * handler for iriap_getvaluebyclass_request()
+ */
+static void ircomm_getvalue_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
+{
+ struct ircomm_cb *self = (struct ircomm_cb *) priv;
+ struct sk_buff *skb= NULL;
+ __u8 *frame;
+ __u8 servicetype = 0 ;
+ ASSERT( self != NULL, return;);
+ ASSERT( self->magic == IRCOMM_MAGIC, return;);
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ DEBUG( 0, __FUNCTION__ "(), got NULL value!\n");
+ ircomm_do_event(self, QUERYIAS_ERROR, NULL);
+ return;
+ }
+
+ DEBUG(4, __FUNCTION__"():type(%d)\n", value->type);
+
+ self->ias_type = value->type;
+ switch(value->type){
+ case IAS_OCT_SEQ:
+
+ DEBUG(4, __FUNCTION__"():got octet sequence:\n");
+#if 0
+ {
+ int i;
+ for ( i=0;i<value->len;i++)
+ printk("%02x",
+ (__u8)(*(value->t.oct_seq + i)));
+ printk("\n");
+ }
+#endif
+ skb = dev_alloc_skb((value->len) + 2);
+ ASSERT(skb != NULL, ircomm_do_event(self, QUERYIAS_ERROR, NULL);return;);
+ frame = skb_put(skb,2);
+ /* MSB first */
+ frame[0] = ( value->len >> 8 ) & 0xff;
+ frame[1] = value->len & 0xff;
+
+ frame = skb_put(skb,value->len);
+ memcpy(frame, value->t.oct_seq, value->len);
+ ircomm_parse_tuples(self, skb, IAS_PARAM);
+ kfree_skb(skb);
+
+ /*
+ * check if servicetype we want is available
+ */
+
+ DEBUG(0,__FUNCTION__"():peer capability is:\n");
+ DEBUG(0,"3wire raw: %s\n",
+ ((self->peer_servicetype & THREE_WIRE_RAW) ? "yes":"no"));
+ DEBUG(0,"3wire : %s\n",
+ ((self->peer_servicetype & THREE_WIRE) ? "yes":"no"));
+ DEBUG(0,"9wire : %s\n",
+ ((self->peer_servicetype & NINE_WIRE) ? "yes":"no"));
+ DEBUG(0,"IEEE1284 : %s\n",
+ ((self->peer_servicetype & CENTRONICS) ? "yes":"no"));
+
+ self->servicetype &= self->peer_servicetype;
+ if(!(self->servicetype)){
+ DEBUG(0,__FUNCTION__"(): servicetype mismatch!\n");
+ ircomm_do_event(self, QUERYIAS_ERROR, NULL);
+ break;
+ }
+
+ /*
+ * then choose better one
+ */
+ if(self->servicetype & THREE_WIRE_RAW)
+ servicetype = THREE_WIRE_RAW;
+ if(self->servicetype & THREE_WIRE)
+ servicetype = THREE_WIRE;
+ if(self->servicetype & NINE_WIRE)
+ servicetype = NINE_WIRE;
+ if(self->servicetype & CENTRONICS)
+ servicetype = CENTRONICS;
+
+ self->servicetype = servicetype;
+
+ /* enter next state */
+ ircomm_do_event(self, GOT_PARAMETERS, NULL);
+ break;
+
+ case IAS_INTEGER:
+ /* LsapSel seems to be sent to me */
+ DEBUG(0, __FUNCTION__"():got lsapsel = %d\n", value->t.integer);
+
+ if ( value->t.integer == -1){
+ DEBUG( 0, __FUNCTION__"():invalid value!\n");
+ ircomm_do_event(self, QUERYIAS_ERROR, NULL);
+ return;
+ }
+ self->dlsap = value->t.integer;
+ ircomm_do_event(self, GOT_LSAPSEL, NULL);
+ break;
+
+ case IAS_MISSING:
+ DEBUG( 0, __FUNCTION__":got IAS_MISSING\n");
+ ircomm_do_event(self, QUERYIAS_ERROR, NULL);
+ break;
+
+ default:
+ DEBUG( 0, __FUNCTION__":got unknown (strange?)type!\n");
+ ircomm_do_event(self, QUERYIAS_ERROR, NULL);
+ break;
+ }
+}
+
+
+
/*
* ----------------------------------------------------------------------
- * Implementation of actions,descrived in section 7.4 of the reference.
+ * Impl. of actions (descrived in section 7.4 of the reference)
* ----------------------------------------------------------------------
*/
-
static void issue_connect_request(struct ircomm_cb *self,
- struct sk_buff *userdata ){
-
+ struct sk_buff *userdata )
+{
/* TODO: we have to send/build userdata field which contains
InitialControlParameters */
- /* but userdata field is not implemeted in irttp.c.. */
switch(self->servicetype){
case THREE_WIRE_RAW:
- /* not implemented yet! Do nothing */
- DEBUG(0, "ircomm:issue_connect_request:"
- "not implemented servicetype!");
+ DEBUG(0, __FUNCTION__"():THREE_WIRE_RAW is not implemented\n");
break;
case DEFAULT:
- irttp_connect_request(self->tsap, self->dlsap, self->daddr,
- NULL, self->maxsdusize, NULL);
- break;
-
case THREE_WIRE:
case NINE_WIRE:
case CENTRONICS:
- irttp_connect_request(self->tsap, self->dlsap, self->daddr,
- NULL, self->maxsdusize, NULL);
+ irttp_connect_request(self->tsap, self->dlsap,
+ self->saddr, self->daddr,
+ NULL, self->maxsdusize, userdata);
break;
default:
- DEBUG(0,"ircomm:issue_connect_request:Illegal servicetype %d\n"
- ,self->servicetype);
+ printk(KERN_ERR __FUNCTION__"():Illegal servicetype %d\n"
+ ,self->servicetype);
}
}
-
-static void disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb){
+static void disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb)
+{
/*
* Not implemented parameter"Reason".That is optional.
@@ -394,12 +576,13 @@ static void disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb){
if(self->notify.disconnect_indication)
self->notify.disconnect_indication( self->notify.instance,
self,
- self->reason,skb);
+ self->reason, skb);
}
static void connect_indication(struct ircomm_cb *self, struct qos_info *qos,
- struct sk_buff *skb){
+ struct sk_buff *skb)
+{
/* If controlparameters don't exist, we use the servicetype"DEFAULT".*/
/* if( !ircomm_parse_controlchannel( self, data)) */
@@ -412,13 +595,16 @@ static void connect_indication(struct ircomm_cb *self, struct qos_info *qos,
#if 0
/* it's for THREE_WIRE_RAW.*/
-static void connect_indication_three_wire_raw(void){
+static void connect_indication_three_wire_raw(void)
+{
DEBUG(0,"ircomm:connect_indication_threewire():not implemented!");
}
#endif
-static void connect_confirmation(struct ircomm_cb *self, struct sk_buff *skb){
+static void connect_confirmation(struct ircomm_cb *self, struct sk_buff *skb)
+{
+ DEBUG(4 ,__FUNCTION__"()\n");
/* give a connect_confirm to the client */
if( self->notify.connect_confirm )
@@ -427,13 +613,13 @@ static void connect_confirmation(struct ircomm_cb *self, struct sk_buff *skb){
}
static void issue_connect_response(struct ircomm_cb *self,
- struct sk_buff *skb ){
+ struct sk_buff *skb)
+{
- DEBUG(0,"ircomm:issue_connect_response:\n");
+ DEBUG(0,__FUNCTION__"()\n");
if( self->servicetype == THREE_WIRE_RAW){
- DEBUG(0,"ircomm:issue_connect_response():3WIRE-RAW is not "
- "implemented yet !\n");
+ DEBUG(0,__FUNCTION__"():THREE_WIRE_RAW is not implemented yet\n");
/* irlmp_connect_rsp(); */
} else {
irttp_connect_response(self->tsap, self->maxsdusize, skb);
@@ -441,64 +627,91 @@ static void issue_connect_response(struct ircomm_cb *self,
}
static void issue_disconnect_request(struct ircomm_cb *self,
- struct sk_buff *userdata ){
+ struct sk_buff *userdata)
+{
if(self->servicetype == THREE_WIRE_RAW){
- DEBUG(0,"ircomm:issue_disconnect_request():3wireraw is not implemented!");
+ DEBUG(0,__FUNCTION__"():3wireraw is not implemented\n");
}
else
- irttp_disconnect_request(self->tsap, NULL, P_NORMAL);
+ irttp_disconnect_request(self->tsap, userdata,
+ self->disconnect_priority);
}
static void issue_data_request(struct ircomm_cb *self,
- struct sk_buff *userdata ){
+ struct sk_buff *userdata )
+{
int err;
if(self->servicetype == THREE_WIRE_RAW){
/* irlmp_data_request(self->lmhandle,userdata); */
- DEBUG(0,"ircomm:issue_data_request():not implemented!");
+ DEBUG(0,__FUNCTION__"():not implemented!");
return;
}
- DEBUG(4,"ircomm:issue_data_request():sending frame\n");
+ DEBUG(4,__FUNCTION__"():sending frame\n");
err = irttp_data_request(self->tsap , userdata );
- if(err)
- DEBUG(0,"ircomm:ttp_data_request failed\n");
- if(userdata && err)
- dev_kfree_skb( userdata);
-
+ if(err){
+ printk(KERN_ERR __FUNCTION__":ttp_data_request failed\n");
+ if(userdata)
+ dev_kfree_skb( userdata);
+ }
+ self->tx_packets++;
}
static void issue_control_request(struct ircomm_cb *self,
- struct sk_buff *userdata ){
- if(self->servicetype == THREE_WIRE_RAW){
- DEBUG(0,"THREE_WIRE_RAW is not implemented\n");
+ struct sk_buff *userdata )
+{
+ int err;
+
+ DEBUG(4,__FUNCTION__"()\n");
+ if(self->servicetype == THREE_WIRE_RAW)
+ {
+ DEBUG(0,__FUNCTION__"():THREE_WIRE_RAW is not implemented\n");
- }else {
- irttp_data_request(self->tsap,userdata);
}
-}
+ else
+ {
+ err = irttp_data_request(self->tsap,userdata);
+ if(err)
+ {
+ printk( __FUNCTION__"():ttp_data_request failed\n");
+ if(userdata)
+ dev_kfree_skb( userdata);
+ }
+ else
+ self->tx_controls++;
+ self->pending_control_tuples = 0;
+ }
+}
-static void process_data(struct ircomm_cb *self, struct sk_buff *skb ){
+static void process_data(struct ircomm_cb *self, struct sk_buff *skb )
+{
- DEBUG(4,"ircomm:process_data:skb_len is(%d),clen_is(%d)\n",
+ DEBUG(4,__FUNCTION__":skb->len=%d, ircomm header = 1, clen=%d\n",
(int)skb->len ,(int)skb->data[0]);
- /*
- * we always have to parse control channel
- * (see page17 of IrCOMM standard)
+ /* we have to parse control channel when receiving. (see
+ * page17 of IrCOMM standard) but it is not parsed here since
+ * upper layer may have some receive buffer.
+ *
+ * hence upper layer have to parse it when it consumes a packet.
+ * -- TH
*/
- ircomm_parse_control(self, skb, CONTROL_CHANNEL);
+ /* ircomm_parse_control(self, skb, CONTROL_CHANNEL); */
if(self->notify.data_indication && skb->len)
self->notify.data_indication(self->notify.instance, self,
skb);
}
-void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb){
+int ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb)
+{
/* Not implemented yet:THREE_WIRE_RAW service uses this function. */
DEBUG(0,"ircomm_data_indication:not implemented yet!\n");
+
+ return 0;
}
@@ -510,45 +723,38 @@ void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb){
*/
static void ircomm_do_event( struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb) {
+ struct sk_buff *skb)
+{
- DEBUG( 4, "ircomm_do_event: STATE = %s, EVENT = %s\n",
+ DEBUG( 4, __FUNCTION__": STATE = %s, EVENT = %s\n",
ircommstate[self->state], ircommevent[event]);
(*state[ self->state ]) ( self, event, skb);
}
-void ircomm_next_state( struct ircomm_cb *self, IRCOMM_STATE state) {
+static void ircomm_next_state( struct ircomm_cb *self, IRCOMM_STATE state)
+{
self->state = state;
- DEBUG( 0, "ircomm_next_state: NEXT STATE = %d(%s), sv(%d)\n",
+ DEBUG( 0, __FUNCTION__": NEXT STATE=%d(%s), servicetype=(%d)\n",
(int)state, ircommstate[self->state],self->servicetype);
}
-/*
- * we currently need dummy (discovering) state for debugging,
- * which state is not defined in the reference.
- */
-
-static void ircomm_state_discovery( struct ircomm_cb *self,
- IRCOMM_EVENT event, struct sk_buff *skb ){
- DEBUG(0,"ircomm_state_discovery: "
- "why call me? \n");
- if(skb)
- dev_kfree_skb( skb);
-}
-
/*
* ircomm_state_idle
*/
static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb ){
+ struct sk_buff *skb )
+{
switch(event){
case IRCOMM_CONNECT_REQUEST:
- ircomm_next_state(self, COMM_WAITI);
- issue_connect_request( self, skb );
+ /* ircomm_next_state(self, COMM_WAITI); */
+ /* issue_connect_request( self, skb ); */
+
+ ircomm_next_state(self, COMM_DISCOVERY_WAIT);
+ start_discovering(self);
break;
case TTP_CONNECT_INDICATION:
@@ -559,15 +765,128 @@ static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
case LMP_CONNECT_INDICATION:
- /* I think this is already done in irlpt_event.c */
-
- DEBUG(0,"ircomm_state_idle():LMP_CONNECT_IND is notimplemented!");
+ DEBUG(0,__FUNCTION__"():LMP_CONNECT_IND is notimplemented!");
/* connect_indication_three_wire_raw(); */
/* ircomm_next_state(self, COMM_WAITR); */
break;
default:
- DEBUG(0,"ircomm_state_idle():unknown event =%d(%s)\n",
+ DEBUG(0,__FUNCTION__"():unknown event =%d(%s)\n",
+ event, ircommevent[event]);
+ }
+}
+
+/*
+ * ircomm_state_discoverywait
+ */
+static void ircomm_state_discoverywait(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb )
+{
+ switch(event){
+
+ case TTP_CONNECT_INDICATION:
+
+ ircomm_next_state(self, COMM_WAITR);
+ queryias_done(self);
+ connect_indication( self, self->qos, skb);
+ break;
+
+ case DISCOVERY_INDICATION:
+ ircomm_next_state(self, COMM_QUERYPARAM_WAIT);
+ query_parameters(self);
+ break;
+
+ case IRCOMM_DISCONNECT_REQUEST:
+ ircomm_next_state(self, COMM_IDLE);
+ queryias_done(self);
+ break;
+
+ case QUERYIAS_ERROR:
+ ircomm_next_state(self, COMM_IDLE);
+ disconnect_indication(self, NULL);
+ queryias_done(self);
+ break;
+
+ default:
+ DEBUG(0,__FUNCTION__"():unknown event =%d(%s)\n",
+ event, ircommevent[event]);
+ }
+}
+
+/*
+ * ircomm_state_queryparamwait
+ */
+
+static void ircomm_state_queryparamwait(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb )
+{
+ switch(event){
+
+ case TTP_CONNECT_INDICATION:
+
+ ircomm_next_state(self, COMM_WAITR);
+ connect_indication( self, self->qos, skb);
+ break;
+
+ case GOT_PARAMETERS:
+
+ ircomm_next_state(self, COMM_QUERYLSAP_WAIT);
+ query_lsapsel( self );
+ break;
+
+ case IRCOMM_DISCONNECT_REQUEST:
+ ircomm_next_state(self, COMM_IDLE);
+ queryias_done(self);
+ break;
+
+ case QUERYIAS_ERROR:
+ ircomm_next_state(self, COMM_IDLE);
+ disconnect_indication(self, NULL);
+ queryias_done(self);
+ break;
+
+ default:
+ DEBUG(0,__FUNCTION__"():unknown event =%d(%s)\n",
+ event, ircommevent[event]);
+ }
+}
+
+/*
+ * ircomm_state_querylsapwait
+ */
+
+static void ircomm_state_querylsapwait(struct ircomm_cb *self, IRCOMM_EVENT event,
+ struct sk_buff *skb )
+{
+ switch(event){
+
+ case TTP_CONNECT_INDICATION:
+
+ ircomm_next_state(self, COMM_WAITR);
+ connect_indication( self, self->qos, skb);
+ break;
+
+ case GOT_LSAPSEL:
+
+ ircomm_next_state(self, COMM_WAITI);
+ queryias_done(self);
+ issue_connect_request( self, skb );
+ break;
+
+ case IRCOMM_DISCONNECT_REQUEST:
+ ircomm_next_state(self, COMM_IDLE);
+ queryias_done(self);
+ break;
+
+ case QUERYIAS_ERROR:
+ ircomm_next_state(self, COMM_IDLE);
+ disconnect_indication(self, NULL);
+ queryias_done(self);
+ break;
+
+
+ default:
+ DEBUG(0,__FUNCTION__"():unknown event =%d(%s)\n",
event, ircommevent[event]);
}
}
@@ -577,7 +896,8 @@ static void ircomm_state_idle( struct ircomm_cb *self, IRCOMM_EVENT event,
*/
static void ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb ){
+ struct sk_buff *skb )
+{
switch(event){
case TTP_CONNECT_CONFIRM:
ircomm_next_state(self, COMM_CONN);
@@ -596,7 +916,7 @@ static void ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
/* ircomm_next_state(self, COMM_IDLE); */
/* break; */
default:
- DEBUG(0,"ircomm_state_waiti:unknown event =%d(%s)\n",
+ DEBUG(0,__FUNCTION__"():unknown event =%d(%s)\n",
event, ircommevent[event]);
}
}
@@ -607,7 +927,8 @@ static void ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event,
* ircomm_state_waitr
*/
static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb ) {
+ struct sk_buff *skb )
+{
switch(event){
case IRCOMM_CONNECT_RESPONSE:
@@ -615,8 +936,7 @@ static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
/* issue_connect_response */
if(self->servicetype==THREE_WIRE_RAW){
- DEBUG(0,"ircomm:issue_connect_response:"
- "THREE_WIRE_RAW is not implemented!\n");
+ DEBUG(0,__FUNCTION__"():3WIRE_RAW is not implemented\n");
/* irlmp_connect_response(Vpeersap,
* ACCEPT,null);
*/
@@ -629,6 +949,7 @@ static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
case IRCOMM_DISCONNECT_REQUEST:
ircomm_next_state(self, COMM_IDLE);
issue_disconnect_request(self, skb);
+ queryias_done(self);
break;
case TTP_DISCONNECT_INDICATION:
@@ -636,6 +957,19 @@ static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
disconnect_indication(self, skb);
break;
+ case DISCOVERY_INDICATION:
+ DEBUG(0, __FUNCTION__"():DISCOVERY_INDICATION\n");
+ queryias_done(self);
+ break;
+ case GOT_PARAMETERS:
+ DEBUG(0, __FUNCTION__"():GOT_PARAMETERS\n");
+ queryias_done(self);
+ break;
+ case GOT_LSAPSEL:
+ DEBUG(0, __FUNCTION__"():GOT_LSAPSEL\n");
+ queryias_done(self);
+ break;
+
/* case LMP_DISCONNECT_INDICATION: */
/* disconnect_indication(); */
/* ircomm_next_state(self, COMM_IDLE); */
@@ -651,23 +985,20 @@ static void ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
*/
static void ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
- struct sk_buff *skb ){
+ struct sk_buff *skb )
+{
switch(event){
case TTP_DATA_INDICATION:
process_data(self, skb);
- /* stay CONN state*/
break;
case IRCOMM_DATA_REQUEST:
issue_data_request(self, skb);
- /* stay CONN state*/
break;
/* case LMP_DATA_INDICATION: */
/* ircomm_data_indicated(); */
-/* stay CONN state */
/* break; */
case IRCOMM_CONTROL_REQUEST:
issue_control_request(self, skb);
- /* stay CONN state*/
break;
case TTP_DISCONNECT_INDICATION:
ircomm_next_state(self, COMM_IDLE);
@@ -676,11 +1007,26 @@ static void ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
case IRCOMM_DISCONNECT_REQUEST:
ircomm_next_state(self, COMM_IDLE);
issue_disconnect_request(self, skb);
+ queryias_done(self);
break;
/* case LM_DISCONNECT_INDICATION: */
/* disconnect_indication(); */
/* ircomm_next_state(self, COMM_IDLE); */
/* break; */
+
+ case DISCOVERY_INDICATION:
+ DEBUG(0, __FUNCTION__"():DISCOVERY_INDICATION\n");
+ queryias_done(self);
+ break;
+ case GOT_PARAMETERS:
+ DEBUG(0, __FUNCTION__"():GOT_PARAMETERS\n");
+ queryias_done(self);
+ break;
+ case GOT_LSAPSEL:
+ DEBUG(0, __FUNCTION__"():GOT_LSAPSEL\n");
+ queryias_done(self);
+ break;
+
default:
DEBUG(0,"ircomm_state_conn:unknown event =%d(%s)\n",
event, ircommevent[event]);
@@ -688,16 +1034,116 @@ static void ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
}
+
/*
* ----------------------------------------------------------------------
- * ircomm requests
+ * IrCOMM service interfaces and supporting functions
*
* ----------------------------------------------------------------------
*/
+/*
+ * start_discovering()
+ *
+ * start discovering and enter DISCOVERY_WAIT state
+ */
+
+static void start_discovering(struct ircomm_cb *self)
+{
+ __u16 hints;
+ ASSERT( self != NULL, return;);
+ ASSERT( self->magic == IRCOMM_MAGIC, return;);
+ DEBUG(4,__FUNCTION__"():servicetype = %d\n",self->servicetype);
+
+
+ hints = irlmp_service_to_hint(S_COMM);
+
+ DEBUG(0,__FUNCTION__"():start discovering..\n");
+ switch (ircomm_cs) {
+ case 0:
+ MOD_INC_USE_COUNT;
+ self->queryias_lock = 1;
+ discovering_instance = self;
+ self->skey = irlmp_register_service(hints);
+ self->ckey = irlmp_register_client(hints, ircomm_discovery_indication,
+ NULL);
+ break;
+
+ case 1: /* client only */
+ MOD_INC_USE_COUNT;
+ self->queryias_lock = 1;
+ discovering_instance = self;
+ DEBUG( 0, __FUNCTION__"():client only mode\n");
+ self->ckey = irlmp_register_client(hints, ircomm_discovery_indication,
+ NULL);
+ break;
+
+ case 2: /* server only */
+ default:
+ DEBUG( 0, __FUNCTION__"():server only mode\n");
+ self->skey = irlmp_register_service(hints);
+ discovering_instance = NULL;
+ break;
+ }
+
+ return;
+}
+
+/*
+ * queryias_done(self)
+ *
+ * called when discovery process got wrong results, completed, or terminated.
+ */
+
+static void queryias_done(struct ircomm_cb *self)
+{
+ DEBUG(0, __FUNCTION__"():\n");
+ if(self->queryias_lock){
+ self->queryias_lock = 0;
+ discovering_instance = NULL;
+ MOD_DEC_USE_COUNT;
+ irlmp_unregister_client(self->ckey);
+ }
+ if(ircomm_cs != 1)
+ irlmp_unregister_service(self->skey);
+ return;
+}
+
+
+
+static void query_parameters(struct ircomm_cb *self)
+{
+
+ DEBUG(0, __FUNCTION__"():querying IAS: Parameters..\n");
+ iriap_getvaluebyclass_request( "IrDA:IrCOMM", "Parameters",
+ self->saddr, self->daddr,
+ ircomm_getvalue_confirm, self );
+}
+
+
+static void query_lsapsel(struct ircomm_cb * self)
+{
+ DEBUG(0, __FUNCTION__"():querying IAS: Lsapsel...\n");
-void ircomm_connect_request(struct ircomm_cb *self, int maxsdusize){
+ if (!(self->servicetype & THREE_WIRE_RAW)) {
+ iriap_getvaluebyclass_request(
+ "IrDA:IrCOMM", "IrDA:TinyTP:LsapSel",
+ self->saddr, self->daddr,
+ ircomm_getvalue_confirm, self );
+ } else {
+ DEBUG(0, __FUNCTION__ "THREE_WIRE_RAW is not implemented!\n");
+ }
+}
+/*
+ * ircomm_connect_request()
+ * Impl. of this function is differ from one of the reference.
+ * This functin does discovery as well as sending connect request
+ */
+
+
+void ircomm_connect_request(struct ircomm_cb *self, __u8 servicetype)
+{
/*
* TODO:build a packet which contains "initial control parameters"
* and send it with connect_request
@@ -706,14 +1152,19 @@ void ircomm_connect_request(struct ircomm_cb *self, int maxsdusize){
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRCOMM_MAGIC, return;);
- DEBUG(0,"ircomm_connect_request:\n");
- self->maxsdusize = maxsdusize;
+ DEBUG(0, __FUNCTION__"():sending connect_request...\n");
+
+ self->servicetype= servicetype;
+ /* ircomm_control_request(self, SERVICETYPE); */ /*servictype*/
+
+ self->maxsdusize = SAR_DISABLE;
ircomm_do_event( self, IRCOMM_CONNECT_REQUEST, NULL);
}
void ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata,
- int maxsdusize){
+ __u32 maxsdusize)
+{
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRCOMM_MAGIC, return;);
@@ -729,11 +1180,8 @@ void ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata,
if(!userdata){
/* FIXME: check for errors and initialize? DB */
userdata = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
- if(!userdata){
- DEBUG(0, __FUNCTION__"alloc_skb failed\n");
- return;
- }
- IS_SKB(userdata, return;);
+ ASSERT(userdata != NULL, return;);
+
skb_reserve(userdata,COMM_HEADER_SIZE);
}
@@ -746,31 +1194,73 @@ void ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata,
ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata);
}
-void ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata){
+void ircomm_disconnect_request(struct ircomm_cb *self,
+ struct sk_buff *userdata,
+ int priority)
+{
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRCOMM_MAGIC, return;);
+ DEBUG(0,__FUNCTION__"()\n");
+
+#if 0
+ /* unregister layer */
+ switch (ircomm_cs) {
+ case 1: /* client only */
+ irlmp_unregister_client(ckey);
+ break;
+
+ case 2: /* server only */
+ irlmp_unregister_service(skey);
+ break;
+ case 0:
+ default:
+ irlmp_unregister_client(ckey);
+ irlmp_unregister_service(skey);
+ break;
+ }
+#endif
+
+ self->disconnect_priority = priority;
+ if(priority != P_HIGH)
+ self->disconnect_priority = P_NORMAL;
- DEBUG(0,"ircomm_disconnect_request\n");
- ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL);
+ ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, userdata);
}
-void ircomm_data_request(struct ircomm_cb *self, struct sk_buff *userdata){
+int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *userdata)
+{
+ __u8 * frame;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRCOMM_MAGIC, return;);
- ASSERT( userdata != NULL, return;);
+ DEBUG(4,__FUNCTION__"()\n");
+ ASSERT( self != NULL, return -EFAULT;);
+ ASSERT( self->magic == IRCOMM_MAGIC, return -EFAULT;);
+ ASSERT( userdata != NULL, return -EFAULT;);
+
if(self->state != COMM_CONN){
- DEBUG(4,"ignore IRCOMM_DATA_REQUEST:not connected\n");
- if(userdata)
- dev_kfree_skb(userdata);
- return;
+ DEBUG(4,__FUNCTION__"():not connected, data is ignored\n");
+ return -EINVAL;
}
- DEBUG(4,"ircomm_data_request\n");
+ if(self->ttp_stop)
+ return -EBUSY;
+
+ if(self->control_ch_pending){
+ /* send control_channel */
+ ircomm_tx_controlchannel(self);
+ }
+
+ if(self->ttp_stop)
+ return -EBUSY;
+
+ /* add "clen" field */
+ frame = skb_push(userdata,1);
+ frame[0]=0; /* without control channel */
+
ircomm_do_event(self, IRCOMM_DATA_REQUEST, userdata);
+ return 0;
}
/*
@@ -780,19 +1270,25 @@ void ircomm_data_request(struct ircomm_cb *self, struct sk_buff *userdata){
* ----------------------------------------------------------------------
*/
-static void ircomm_tx_ctrlbuffer(struct ircomm_cb *self ){
+
+static void ircomm_tx_controlchannel(struct ircomm_cb *self )
+{
__u8 clen;
struct sk_buff *skb = self->ctrl_skb;
- DEBUG(4,"ircomm_tx_ctrlbuffer:\n");
+ DEBUG(4,__FUNCTION__"()\n");
+ /* 'self' should have been checked */
+ ASSERT(!self->ttp_stop, return ;);
+ ASSERT(self->state == COMM_CONN, return ;);
/* add "clen" field */
clen=skb->len;
- if(clen){
- skb_push(skb,1);
- skb->data[0]=clen;
+ ASSERT(clen != 0,return;);
+
+ skb_push(skb,1);
+ skb->data[0]=clen;
#if 0
printk("tx_ctrl:");
@@ -803,145 +1299,106 @@ static void ircomm_tx_ctrlbuffer(struct ircomm_cb *self ){
printk("\n");
}
#endif
-
- ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb);
-
- skb = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
- if (skb==NULL){
- DEBUG(0,"ircomm_tx_ctrlbuffer:alloc_skb failed!\n");
- return;
- }
- skb_reserve(skb,COMM_HEADER_SIZE);
- self->ctrl_skb = skb;
- }
-}
-
-
-void ircomm_control_request(struct ircomm_cb *self){
- struct sk_buff *skb;
+ ircomm_do_event(self, IRCOMM_CONTROL_REQUEST, skb);
+ self->control_ch_pending = 0;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRCOMM_MAGIC, return;);
+ skb = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ ASSERT(skb != NULL, return ;);
- DEBUG(0, "ircomm_control_request:\n");
-
- if(self->ttp_stop || self->state != COMM_CONN){
- DEBUG(0,"ircomm_control_request:can't send it.. ignore it\n");
- return;
- }
-
- skb = self->ctrl_skb;
- IS_SKB(skb,return;);
-
- if(skb->len)
- ircomm_tx_ctrlbuffer(self);
+ skb_reserve(skb,COMM_HEADER_SIZE);
+ self->ctrl_skb = skb;
}
-static void append_tuple(struct ircomm_cb *self,
- __u8 instruction, __u8 pl , __u8 *value){
-
+static void append_tuple(struct ircomm_cb *self, __u8 instruction, __u8 pl ,
+ __u8 *value)
+{
__u8 *frame;
struct sk_buff *skb;
- int i,c;
+ int i,c = 0;
+ unsigned long flags;
+
+ save_flags(flags);cli();
skb = self->ctrl_skb;
ASSERT(skb != NULL, return;);
- IS_SKB(skb,return;);
- /*if there is little room in the packet... */
-
- if(skb->len > COMM_DEFAULT_DATA_SIZE - COMM_HEADER_SIZE - (pl+2)){
- if(!self->ttp_stop && self->state == COMM_CONN){
-
- /* send a packet if we can */
- ircomm_tx_ctrlbuffer(self);
- skb = self->ctrl_skb;
- } else {
- DEBUG(0, "ircomm_append_ctrl:there's no room.. ignore it\n");
-
- /* TODO: we have to detect whether we have to resend some
- information after ttp_stop is cleared */
-
- /* self->resend_ctrl = 1; */
- return;
- }
+ if(skb_tailroom(skb) < (pl+2)){
+ DEBUG(0, __FUNCTION__"there's no room.. ignore it\n");
+ self->ignored_control_tuples++;
+ restore_flags(flags);
+ return;
}
frame = skb_put(skb,pl+2);
- c = 0;
frame[c++] = instruction; /* PI */
frame[c++] = pl; /* PL */
for(i=0; i < pl ; i++)
frame[c++] = *value++; /* PV */
-
+ restore_flags(flags);
+ self->pending_control_tuples++;
+ self->control_ch_pending = 1;
}
/*
- * ircomm_append_ctrl();
+ * ircomm_control_request();
* this function is exported as a request to send some control-channel tuples
* to peer device
*/
-void ircomm_append_ctrl(struct ircomm_cb *self, __u8 instruction){
+void ircomm_control_request(struct ircomm_cb *self, __u8 instruction)
+{
- __u8 pv[70];
+ __u8 pv[32]; /* 32 max, for PORT_NAME */
__u8 *value = &pv[0];
__u32 temp;
+ int notsupp=0;
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRCOMM_MAGIC, return;);
- if(self->state != COMM_CONN)
- return;
-
if(self->servicetype == THREE_WIRE_RAW){
- DEBUG(0,"THREE_WIRE_RAW shuold not use me!\n");
+ DEBUG(0,__FUNCTION__"():THREE_WIRE_RAW shuold not use me!\n");
return;
}
- DEBUG(4,"ircomm_append_ctrl:\n");
+ DEBUG(4,__FUNCTION__"()\n");
/* find parameter and its length */
- switch(instruction){
+ if(self->servicetype == THREE_WIRE) goto threewire;
+ if(self->servicetype == NINE_WIRE) goto ninewire;
- case POLL_FOR_LINE_SETTINGS:
- case STATUS_QUERY:
+
+ /* FIXME: centronics service is not fully implemented yet*/
+ switch(instruction){
case IEEE1284_MODE_SUPPORT:
case IEEE1284_DEVICEID:
append_tuple(self,instruction,0,NULL);
break;
-
- case SERVICETYPE:
- value[0] = self->servicetype;
- append_tuple(self,instruction,1,value);
+ case STATUS_QUERY:
+ append_tuple(self,instruction,0,NULL);
break;
- case DATA_FORMAT:
- value[0] = self->data_format;
+ case SET_BUSY_TIMEOUT:
+ value[0] = self->busy_timeout;
append_tuple(self,instruction,1,value);
break;
- case FLOW_CONTROL:
- if(self->null_modem_mode){
+ case IEEE1284_ECP_EPP_DATA_TRANSFER:
+ value[0] = self->ecp_epp_mode;
+ value[1] = self->channel_or_addr;
+ append_tuple(self,instruction,2,value);
+ break;
+ default:
+ notsupp=1;
+ }
- /* inside out */
- value[0] = (self->flow_ctrl & 0x55) << 1;
- value[0] |= (self->flow_ctrl & 0xAA) >> 1;
- }else{
- value[0] = self->flow_ctrl;
- }
- append_tuple(self,instruction,1,value);
- break;
- case LINESTATUS:
- value[0] = self->line_status;
- append_tuple(self,instruction,1,value);
- break;
- case BREAK_SIGNAL:
- value[0] = self->break_signal;
- append_tuple(self,instruction,1,value);
+ ninewire:
+ switch(instruction){
+ case POLL_FOR_LINE_SETTINGS:
+ append_tuple(self,instruction,0,NULL);
break;
case DTELINE_STATE:
if(self->null_modem_mode){
@@ -972,8 +1429,42 @@ void ircomm_append_ctrl(struct ircomm_cb *self, __u8 instruction){
value[0] = self->dce;
append_tuple(self,instruction,1,value);
break;
- case SET_BUSY_TIMEOUT:
- value[0] = self->busy_timeout;
+
+ default:
+ notsupp=1;
+ }
+
+ threewire:
+ switch(instruction){
+
+ case SERVICETYPE:
+ value[0] = self->servicetype;
+ append_tuple(self,instruction,1,value);
+ break;
+
+ case DATA_FORMAT:
+ value[0] = self->data_format;
+ append_tuple(self,instruction,1,value);
+ break;
+
+ case FLOW_CONTROL:
+ if(self->null_modem_mode){
+ /* inside out */
+ value[0] = (self->flow_ctrl & 0x55) << 1;
+ value[0] |= (self->flow_ctrl & 0xAA) >> 1;
+ }else{
+ value[0] = self->flow_ctrl;
+ }
+ append_tuple(self,instruction,1,value);
+ break;
+
+ case LINESTATUS:
+ value[0] = self->line_status;
+ append_tuple(self,instruction,1,value);
+ break;
+
+ case BREAK_SIGNAL:
+ value[0] = self->break_signal;
append_tuple(self,instruction,1,value);
break;
@@ -989,12 +1480,6 @@ void ircomm_append_ctrl(struct ircomm_cb *self, __u8 instruction){
append_tuple(self,instruction,2,value);
break;
- case IEEE1284_ECP_EPP_DATA_TRANSFER:
- value[0] = self->ecp_epp_mode;
- value[1] = self->channel_or_addr;
- append_tuple(self,instruction,2,value);
- break;
-
case DATA_RATE:
temp = self->data_rate;
value[3] = (__u8)((temp >> 24) & 0x000000ff);
@@ -1003,202 +1488,300 @@ void ircomm_append_ctrl(struct ircomm_cb *self, __u8 instruction){
value[0] = (__u8)(temp & 0x000000ff);
append_tuple(self,instruction,4,value);
break;
-
#if 0
case PORT_NAME:
case FIXED_PORT_NAME:
temp = strlen(&self->port_name);
- if(temp < 70){
+ if(temp < 32){
value = (__u8) (self->port_name);
append_tuple(self,instruction,temp,value);
- }
- break;
+ }else
+ DEBUG(0,__FUNCTION__"() PORT_NAME:too long\n");
#endif
-
-/* TODO: control tuples for centronics emulation is not implemented */
-/* case IEEE1284_MODE: */
+ break;
default:
- DEBUG(0,"ircomm_append_ctrl:instruction(0x%02x)is not"
- "implemented\n",instruction);
+ if(notsupp)
+ DEBUG(0,__FUNCTION__"():instruction(0x%02x)is not"
+ "implemented\n",instruction);
}
}
-static void ircomm_parse_control(struct ircomm_cb *self,
- struct sk_buff *skb,
- int type){
+void ircomm_parse_tuples(struct ircomm_cb *self, struct sk_buff *skb, int type)
+{
__u8 *data;
- __u8 pi,pl,pv[64];
+ __u8 pi,plen;
int clen = 0;
- int i,indicate,count = 0;
+ int indicate=0;
-
- data = skb->data;
- if(type == IAS_PARAM)
- clen = ((data[count++] << 8) & data[count++]); /* MSB first */
- else /* CONTROL_CHANNEL */
- clen = data[count++];
+ ASSERT(skb != NULL, return;);
+ ASSERT(self != NULL, return ;);
+ ASSERT(self->magic == IRCOMM_MAGIC, return ;);
- if(clen == 0){
- skb_pull( skb, 1); /* remove clen field */
- return;
+#ifdef IRCOMM_DEBUG_TUPLE
+ DEBUG(0, __FUNCTION__"():tuple sequence is:\n");
+ {
+ int i;
+ for ( i=0;i< skb->len;i++)
+ printk("%02x", (__u8)(skb->data[i]));
+ printk("\n");
}
+#endif
+ data = skb->data;
+ if(type == IAS_PARAM)
+ {
+ clen = (data[0] << 8) & 0xff00;
+ clen |= data[1] & 0x00ff;
+ ASSERT( clen <= (skb->len - 2) && clen <= 1024, goto corrupted;);
+ DEBUG(4, __FUNCTION__"():IAS_PARAM len = %d\n",clen );
+ skb_pull( skb, 2);
+ }
+ else
+ {
+ /* CONTROL_CHANNEL */
+ clen = data[0];
+ ASSERT( clen < skb->len, goto corrupted;);
+ DEBUG(4, __FUNCTION__"():CONTROL_CHANNEL:len = %d\n",clen );
+ skb_pull( skb, 1);
+ }
+ while( clen >= 2 ){
+ data = skb->data;
+ indicate = 0;
-
- while( count < clen ){
/*
* parse controlparameters and set value into structure
*/
- pi = data[count++];
- pl = data[count++];
-
- DEBUG(0, "parse_control:instruction(0x%02x)\n",pi) ;
+ pi = data[0];
+ plen = data[1];
+ ASSERT( clen >= 2+plen, goto corrupted; );
+ DEBUG(4, __FUNCTION__"():instruction=0x%02x,len=%d\n",
+ pi, plen) ;
- /* copy a tuple into pv[] */
-#ifdef IRCOMM_DEBUG_TUPLE
- printk("data:");
- for(i=0; i < pl; i++){
- pv[i] = data[count++];
- printk("%02x",pv[i]);
- }
- printk("\n");
-#else
- for(i=0; i < pl; i++)
- pv[i] = data[count++];
-#endif
+ switch(pi)
+ {
+ case POLL_FOR_LINE_SETTINGS:
+ ircomm_control_request(self, DTELINE_STATE);
+ break;
-
- /* parse pv */
- indicate = 0;
-
- switch(pi){
-
- /*
- * for 3-wire/9-wire/centronics
- */
-
case SERVICETYPE:
- self->peer_servicetype = pv[0];
+ self->peer_servicetype = data[2];
break;
+
case PORT_TYPE:
- self->peer_port_type = pv[0];
- break;
-#if 0
- case PORT_NAME:
- self->peer_port_name = *pv;
- break;
- case FIXED_PORT_NAME:
- self->peer_port_name = *pv;
- /*
- * We should not connect if user of IrCOMM can't
- * recognize the port name
- */
- self->port_name_critical = TRUE;
- break;
-#endif
- case DATA_RATE:
- self->peer_data_rate = (pv[3]<<24) & (pv[2]<<16)
- & (pv[1]<<8) & pv[0];
- indicate = 1;
+ self->peer_port_type = data[2];
break;
+
case DATA_FORMAT:
- self->peer_data_format = pv[0];
+ self->peer_data_format = data[2];
break;
+
case FLOW_CONTROL:
- self->peer_flow_ctrl = pv[0];
- indicate = 1;
- break;
- case XON_XOFF_CHAR:
- self->peer_xon_char = pv[0];
- self->peer_xoff_char = pv[1];
- indicate = 1;
- break;
- case ENQ_ACK_CHAR:
- self->peer_enq_char = pv[0];
- self->peer_ack_char = pv[1];
+ self->peer_flow_ctrl = data[2];
indicate = 1;
break;
+
case LINESTATUS:
- self->peer_line_status = pv[0];
+ self->peer_line_status = data[2];
indicate = 1;
break;
+
case BREAK_SIGNAL:
- self->peer_break_signal = pv[0];
+ self->peer_break_signal = data[2];
/* indicate = 1; */
break;
-
- /*
- * for 9-wire
- */
+
+ case DCELINE_STATE:
+ self->peer_dce = data[2];
+ indicate = 1;
+ break;
case DTELINE_STATE:
if(self->null_modem_mode){
/* input DTR as {DSR & CD & RI} */
self->peer_dce = 0;
- if(pv[0] & DELTA_DTR)
- self->peer_dce |= DELTA_DSR|DELTA_RI|DELTA_DCD;
- if(pv[0] & MCR_DTR)
- self->peer_dce |= MSR_DSR|MSR_RI|MSR_DCD;
-
+ if(data[2] & DELTA_DTR)
+ self->peer_dce |= (DELTA_DSR|
+ DELTA_RI|
+ DELTA_DCD);
+ if(data[2] & MCR_DTR)
+ self->peer_dce |= (MSR_DSR|
+ MSR_RI|
+ MSR_DCD);
/* rts as cts */
- if(pv[0] & DELTA_RTS)
+ if(data[2] & DELTA_RTS)
self->peer_dce |= DELTA_CTS;
- if(pv[0] & MCR_RTS)
+ if(data[2] & MCR_RTS)
self->peer_dce |= MSR_CTS;
}else{
- self->peer_dte = pv[0];
+ self->peer_dte = data[2];
}
indicate = 1;
break;
+
+ case XON_XOFF_CHAR:
+ self->peer_xon_char = data[2];
+ self->peer_xoff_char = data[3];
+ indicate = 1;
+ break;
- case DCELINE_STATE:
- self->peer_dce = pv[0];
+ case ENQ_ACK_CHAR:
+ self->peer_enq_char = data[2];
+ self->peer_ack_char = data[3];
indicate = 1;
break;
- case POLL_FOR_LINE_SETTINGS:
- ircomm_append_ctrl(self, DTELINE_STATE);
- ircomm_control_request(self);
+ case DATA_RATE:
+ self->peer_data_rate = ( data[5]<<24
+ & data[4]<<16
+ & data[3]<<8
+ & data[2]);
+ indicate = 1;
break;
- /*
- * for centronics .... not implemented yet
+ case PORT_NAME:
+ ASSERT(plen <= 32 , goto corrupted;);
+ memcpy(self->port_name, data + 2, plen);
+ *(__u8 *)(self->port_name+plen) = 0;
+ break;
+
+ case FIXED_PORT_NAME:
+ ASSERT(plen <= 32 , goto corrupted;);
+ memcpy(self->port_name, data + 2, plen);
+ *(__u8 *)(self->port_name+plen) = 0;
+ /*
+ * We should not connect if user of IrCOMM can't
+ * recognize the port name
*/
-/* case STATUS_QUERY: */
-/* case SET_BUSY_TIMEOUT: */
-/* case IEEE1284_MODE_SUPPORT: */
-/* case IEEE1284_DEVICEID: */
-/* case IEEE1284_MODE: */
-/* case IEEE1284_ECP_EPP_DATA_TRANSFER: */
-
- default:
- DEBUG(0, "ircomm_parse_control:not implemented "
- "instruction(%d)\n", pi);
+ self->port_name_critical = TRUE;
break;
+
+ default:
+ DEBUG(0, __FUNCTION__
+ "():not implemented (PI=%d)\n", pi);
}
- if(indicate && self->notify.flow_indication
- && type == CONTROL_CHANNEL){
-
- DEBUG(0,"ircomm:parse_control:indicating..:\n");
+
+ if(indicate &&
+ self->notify.flow_indication && type == CONTROL_CHANNEL)
+ {
+ DEBUG(4,__FUNCTION__":indicating..:\n");
self->pi = pi;
if(self->notify.flow_indication)
- self->notify.flow_indication(self->notify.instance, self, 0);
- indicate = 0;
+ self->notify.flow_indication(self->notify.instance,
+ self,
+ CONTROL_CHANNEL);
}
+ skb_pull(skb, 2+plen);
+ clen -= (2+plen);
}
- skb_pull( skb, 1+clen);
+
return;
+
+ corrupted:
+ skb_pull(skb, skb->len); /* remove suspicious data */
+ return;
+}
+
+/*
+ * ----------------------------------------------------------------------
+ * Function ircomm_open_instance() ,ircomm_close_instance() and friends
+ *
+ * ircomm_open_instance discoveres the peer device and then issues a
+ * connect request
+ * ----------------------------------------------------------------------
+ */
+
+
+
+struct ircomm_cb * ircomm_open_instance( struct notify_t client_notify)
+{
+ int i;
+ struct ircomm_cb *self = NULL;
+ struct notify_t notify;
+ unsigned long flags;
+
+ ASSERT(ircomm != NULL,return NULL;);
+ DEBUG(0,__FUNCTION__"():\n");
+
+ /* find free handle */
+
+ save_flags(flags);
+ cli();
+ for(i = 0; i < IRCOMM_MAX_CONNECTION; i++){
+ ASSERT(ircomm[i] != NULL,return(NULL););
+ if(!ircomm[i]->in_use){
+ self = ircomm[i];
+ break;
+ }
+ }
+
+ if (!self){
+ DEBUG(0,__FUNCTION__"():no free handle!\n");
+ return (NULL);
+ }
+
+ self->in_use = 1;
+ restore_flags(flags);
+
+ self->notify = client_notify;
+ self->ttp_stop = 0;
+ self->control_ch_pending = 0;
+
+ /* register callbacks */
+
+ irda_notify_init(&notify);
+ notify.data_indication = ircomm_accept_data_indication;
+ notify.connect_confirm = ircomm_accept_connect_confirm;
+ notify.connect_indication = ircomm_accept_connect_indication;
+ notify.flow_indication = ircomm_accept_flow_indication;
+ notify.disconnect_indication = ircomm_accept_disconnect_indication;
+ notify.instance = self;
+ strncpy( notify.name, "IrCOMM", NOTIFY_MAX_NAME);
+
+ self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT,
+ &notify);
+ if(!self->tsap){
+ DEBUG(0,__FUNCTION__"failed to allocate tsap\n");
+ return NULL;
+ }
+
+ ircomm_next_state(self, COMM_IDLE);
+ return (self);
}
+int ircomm_close_instance(struct ircomm_cb *self)
+{
+ ASSERT( self != NULL, return -EIO;);
+ ASSERT( self->magic == IRCOMM_MAGIC, return -EIO;);
+ ASSERT( self->ctrl_skb != NULL, return -EIO;);
+
+ DEBUG(0,__FUNCTION__"()\n");
+
+ /* shutdown ircomm layer */
+ if(self->state != COMM_IDLE && self->state != COMM_WAITI)
+ {
+ DEBUG(0,__FUNCTION__"():force disconnecting..\n");
+ ircomm_disconnect_request(self, NULL, P_HIGH);
+ }
+
+ skb_trim(self->ctrl_skb,0);
+ /* remove a tsap */
+ if(self->tsap)
+ irttp_close_tsap(self->tsap);
+ self->tsap = NULL;
+ self->in_use = 0;
+ return 0;
+}
+
+
/*
* ----------------------------------------------------------------------
* Function init_module(void) ,cleanup_module()
@@ -1209,20 +1792,21 @@ static void ircomm_parse_control(struct ircomm_cb *self,
*/
#ifdef MODULE
+int init_module(void)
+{
+ int err;
-int init_module(void) {
- ircomm_init();
+ err = ircomm_init();
- DEBUG( 4, "ircomm:init_module:done\n");
- return 0;
+ DEBUG( 4, __FUNCTION__"():done.\n");
+ return err;
}
void cleanup_module(void)
{
ircomm_cleanup();
- DEBUG( 0, "ircomm_common:cleanup_module:done.\n");
+ DEBUG( 4, __FUNCTION__"():done.\n");
}
-
#endif /* MODULE */
/************************************************************
@@ -1238,53 +1822,92 @@ void cleanup_module(void)
*
*/
int ircomm_proc_read(char *buf, char **start, off_t offset,
- int len, int unused){
+ int len, int unused)
+{
int i, index;
len = 0;
for (i=0; i<IRCOMM_MAX_CONNECTION; i++) {
+ len += sprintf(buf+len, "instance %d:\n",i);
+ if(ircomm[i]->in_use == 0){
+ len += sprintf(buf+len, "\tunused\n");
+ continue;
+ }
+
if (ircomm[i] == NULL || ircomm[i]->magic != IRCOMM_MAGIC) {
- len += sprintf(buf+len, "???\t");
- }else {
- switch (ircomm[i]->servicetype) {
- case UNKNOWN:
- index = 0;
- break;
- case THREE_WIRE_RAW:
- index = 1;
- break;
- case THREE_WIRE:
- index = 2;
- break;
- case NINE_WIRE:
- index = 3;
- break;
- case CENTRONICS:
- index = 4;
- break;
- default:
- index = 0;
- break;
- }
- len += sprintf(buf+len, "service: %s\t",
- ircommservicetype[index]);
- if(index){
- len += sprintf(buf+len, "porttype: %s ",
- ircommporttype[ircomm[i]->port_type]);
- len += sprintf(buf+len, "state: %s ",
- ircommstate[ircomm[i]->state]);
- len += sprintf(buf+len, "user: %s ",
- ircomm[i]->notify.name);
- len += sprintf(buf+len, "nullmodem emulation: %s",
- (ircomm[i]->null_modem_mode ? "yes":"no"));
- }
+ len += sprintf(buf+len, "\tbroken???\n");
+ continue;
+ }
+
+ switch (ircomm[i]->servicetype) {
+ case UNKNOWN:
+ index = 0;
+ break;
+ case THREE_WIRE_RAW:
+ index = 1;
+ break;
+ case THREE_WIRE:
+ index = 2;
+ break;
+ case NINE_WIRE:
+ index = 3;
+ break;
+ case CENTRONICS:
+ index = 4;
+ break;
+ default:
+ index = 0;
+ break;
}
- len += sprintf(buf+len, "\n");
+ len += sprintf(buf+len, " service: %s ",
+ ircommservicetype[index]);
+ if(!index)
+ continue;
+
+ len += sprintf(buf+len, "porttype: %s ",
+ ircommporttype[ircomm[i]->port_type]);
+ len += sprintf(buf+len, "state: %s ",
+ ircommstate[ircomm[i]->state]);
+ len += sprintf(buf+len, "user: %s\n",
+ ircomm[i]->notify.name);
+
+ len += sprintf(buf+len, " tx packets: %d ",
+ ircomm[i]->tx_packets);
+ len += sprintf(buf+len, "rx packets: %d ",
+ ircomm[i]->rx_packets);
+ len += sprintf(buf+len, "tx controls: %d\n",
+ ircomm[i]->tx_controls);
+
+ len += sprintf(buf+len, " pending tuples: %d ",
+ ircomm[i]->pending_control_tuples);
+ len += sprintf(buf+len, " ignored tuples: %d\n",
+ ircomm[i]->ignored_control_tuples);
+
+ len += sprintf(buf+len, " nullmodem emulation: %s ",
+ (ircomm[i]->null_modem_mode ? "yes":"no"));
+ len += sprintf(buf+len, "IrTTP: %s\n",
+ (ircomm[i]->ttp_stop ? "BUSY":"READY"));
+
+ len += sprintf(buf+len, " Peer capability: ");
+ if(ircomm[i]->peer_cap & THREE_WIRE_RAW)
+ len += sprintf(buf+len, "3wire-raw ");
+ if(ircomm[i]->peer_cap & THREE_WIRE)
+ len += sprintf(buf+len, "3wire ");
+ if(ircomm[i]->peer_cap & NINE_WIRE)
+ len += sprintf(buf+len, "9wire ");
+ if(ircomm[i]->peer_cap & CENTRONICS)
+ len += sprintf(buf+len, "centronics");
+
+ len += sprintf(buf+len, "\n Port name: %s\n",
+ (ircomm[i]->port_name));
}
+
return len;
}
-
#endif /* CONFIG_PROC_FS */
+
+
+
diff --git a/net/irda/ircomm/irvtd.c b/net/irda/ircomm/irvtd.c
deleted file mode 100644
index e01781556..000000000
--- a/net/irda/ircomm/irvtd.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*********************************************************************
- *
- * Filename: irvtd.c
- * Version:
- * Description: A virtual tty driver implementaion,
- * which also may be called as "Port Emulation Entity"
- * in IrCOMM specification.
- * Status: Experimental.
- * Author: Takahide Higuchi <thiguchi@pluto.dti.ne.jp>
- * Source: irlpt.c
- *
- * Copyright (c) 1998, Takahide Higuchi, <thiguchi@pluto.dti.ne.jp>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * I, Takahide Higuchi, provide no warranty for any of this software.
- * This material is provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-/* #include <linux/module.h> */
-
-#include <linux/init.h>
-
-#include <net/irda/irda.h>
-#include <net/irda/irlmp.h>
-
-#include <net/irda/irvtd.h>
-#include <net/irda/irvtd_driver.h>
-
-struct irvtd_cb **irvtd = NULL;
-extern struct ircomm_cb **ircomm;
-
-#if 0
-static char *rcsid = "$Id: irvtd.c,v 1.2 1998/09/27 08:37:04 takahide Exp $";
-#endif
-static char *version = "IrVTD, $Revision: 1.2 $ $Date: 1998/09/27 08:37:04 $ (Takahide Higuchi)";
-
-
-/************************************************************
- * init & cleanup this module
- ************************************************************/
-
-/*
- * Function init_module(void)
- *
- * Initializes the ircomm control structure
- * This Function is called when you do insmod.
- */
-
-__initfunc(int irvtd_init(void))
-{
- int i;
-
- DEBUG( 4, "irvtd:init_module:\n");
- printk( KERN_INFO "%s\n", version);
-
- /* we allocate master array */
-
- irvtd = (struct irvtd_cb **) kmalloc( sizeof(void *) *
- COMM_MAX_TTY,GFP_KERNEL);
- if ( irvtd == NULL) {
- printk( KERN_WARNING "irvtd: Can't allocate array!\n");
- return -ENOMEM;
- }
-
- memset( irvtd, 0, sizeof(void *) * COMM_MAX_TTY);
-
-
- /* we initialize structure */
-
- for (i=0; i < COMM_MAX_TTY; i++){
- irvtd[i] = kmalloc( sizeof(struct irvtd_cb), GFP_KERNEL);
- if(irvtd[i] == NULL){
- printk(KERN_ERR "ircomm_open(): kmalloc failed!\n");
- return -ENOMEM;
- }
-
- memset( irvtd[i], 0, sizeof(struct irvtd_cb));
- irvtd[i]->magic = IRVTD_MAGIC;
- }
-
- /*
- * initialize a "port emulation entity"
- */
-
- if(irvtd_register_ttydriver()){
- printk( KERN_WARNING "IrCOMM: Error in ircomm_register_device\n");
- return -ENODEV;
- }
-
-
- DEBUG( 4, "irvtd:init_module:done\n");
- return 0;
-}
-
-void irvtd_cleanup(void)
-{
- int i;
- DEBUG( 4, "--> ircomm:cleanup_module\n");
-
- /*
- * free some resources
- */
- if (irvtd) {
- for (i=0; i<COMM_MAX_TTY; i++) {
- if (irvtd[i]) {
- DEBUG( 4, "freeing structures\n");
- /* irvtd_close(); :{| */
- kfree(irvtd[i]);
- irvtd[i] = NULL;
- }
- }
- DEBUG( 4, "freeing master array\n");
- kfree(irvtd);
- irvtd = NULL;
- }
-
-
-
- DEBUG( 0, "unregister_ttydriver..\n");
- irvtd_unregister_ttydriver();
-
- DEBUG( 4, "ircomm:cleanup_module -->\n");
-}
-
-#ifdef MODULE
-
-int init_module(void)
-{
- irvtd_init();
- return 0;
-}
-
-
-/*
- * Function ircomm_cleanup (void)
- * This is called when you rmmod.
- */
-
-void cleanup_module(void)
-{
- irvtd_cleanup();
-}
-
-#endif /* MODULE */
-
-
-
diff --git a/net/irda/ircomm/irvtd_driver.c b/net/irda/ircomm/irvtd_driver.c
index 7738e3e78..2df2fdd60 100644
--- a/net/irda/ircomm/irvtd_driver.c
+++ b/net/irda/ircomm/irvtd_driver.c
@@ -2,7 +2,7 @@
*
* Filename: irvtd_driver.c
* Version:
- * Description: An implementation of "port emulation entity" of IrCOMM
+ * Description: Virtual tty driver (the "port emulation entity" of IrCOMM)
* Status: Experimental.
* Author: Takahide Higuchi <thiguchi@pluto.dti.ne.jp>
* Source: serial.c by Linus Torvalds
@@ -22,17 +22,20 @@
********************************************************************/
#include <linux/module.h>
+#include <linux/init.h>
+
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/termios.h>
+#include <linux/tty.h>
#include <asm/segment.h>
#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/irttp.h>
+#include <net/irda/irias_object.h>
#include <net/irda/irvtd.h>
-#include <net/irda/irvtd_driver.h>
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
@@ -41,14 +44,15 @@
#define DO_RESTART
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-static char *irvtd_ttyname = "irnine";
-struct tty_driver irvtd_drv, irvtd_callout_driver;
+struct tty_driver irvtd_drv;
struct tty_struct *irvtd_table[COMM_MAX_TTY];
struct termios *irvtd_termios[COMM_MAX_TTY];
struct termios *irvtd_termios_locked[COMM_MAX_TTY];
-static int ircomm_vsd_refcount;
-extern struct ircomm_cb **ircomm;
-extern struct irvtd_cb **irvtd;
+static int irvtd_refcount;
+struct irvtd_cb **irvtd = NULL;
+
+static char *revision_date = "Sun Apr 18 17:31:53 1999";
+
/*
* prototypes
@@ -70,158 +74,32 @@ void irvtd_stop(struct tty_struct *tty);
void irvtd_start(struct tty_struct *tty);
void irvtd_hangup(struct tty_struct *tty);
void irvtd_flush_buffer(struct tty_struct *tty);
+void irvtd_flush_chars(struct tty_struct *tty);
-static void flush_txbuff(struct irvtd_cb *info);
static void change_speed(struct irvtd_cb *driver);
-static void irvtd_write_to_tty( void *instance );
-
+static void irvtd_write_to_tty( struct irvtd_cb *);
+static void irvtd_send_data_request( struct irvtd_cb *);
static void irvtd_break(struct tty_struct *tty, int break_state);
static void irvtd_send_xchar(struct tty_struct *tty, char ch);
+static void irvtd_wait_until_sent(struct tty_struct *tty, int timeout);
-#if 0
-static char *rcsid = "$Id: irvtd_driver.c,v 1.13 1998/12/06 10:09:07 takahide Exp $";
-#endif
-
-
-
-
-/*
- * Function ircomm_register_device(void)
- * we register "port emulation entity"(see IrCOMM specification) here
- * as a tty device.
- * it will be called when you insmod.
- * ( This function derives from linux/drivers/char/serial.c )
- */
-
-int irvtd_register_ttydriver(void){
-
- DEBUG( 4, "-->irvtd_register_ttydriver\n");
-
- /* setup virtual serial port device */
-
- /* Initialize the tty_driver structure ,which is defined in
- tty_driver.h */
-
- memset(&irvtd_drv, 0, sizeof(struct tty_driver));
- irvtd_drv.magic = IRVTD_MAGIC;
- irvtd_drv.name = irvtd_ttyname;
- irvtd_drv.major = IRCOMM_MAJOR;
- irvtd_drv.minor_start = IRVTD_MINOR;
- irvtd_drv.num = COMM_MAX_TTY;
- irvtd_drv.type = TTY_DRIVER_TYPE_SERIAL; /* see tty_driver.h */
- irvtd_drv.subtype = IRVTD_TYPE_NORMAL; /* private type */
-
- /*
- * see drivers/char/tty_io.c and termios(3)
- */
-
- irvtd_drv.init_termios = tty_std_termios;
- irvtd_drv.init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- irvtd_drv.flags = TTY_DRIVER_REAL_RAW; /* see tty_driver.h */
- irvtd_drv.refcount = &ircomm_vsd_refcount;
-
- /* pointer to the tty data structures */
-
- irvtd_drv.table = irvtd_table;
- irvtd_drv.termios = irvtd_termios;
- irvtd_drv.termios_locked = irvtd_termios_locked;
-
- /*
- * Interface table from the kernel(tty driver) to the ircomm
- * layer
- */
-
- irvtd_drv.open = irvtd_open;
- irvtd_drv.close = irvtd_close;
- irvtd_drv.write = irvtd_write;
- irvtd_drv.put_char = irvtd_put_char;
- irvtd_drv.flush_chars = irvtd_flush_chars;
- irvtd_drv.write_room = irvtd_write_room;
- irvtd_drv.chars_in_buffer = irvtd_chars_in_buffer;
- irvtd_drv.flush_buffer = irvtd_flush_buffer;
- irvtd_drv.ioctl = irvtd_ioctl;
- irvtd_drv.throttle = irvtd_throttle;
- irvtd_drv.unthrottle = irvtd_unthrottle;
- irvtd_drv.set_termios = irvtd_set_termios;
- irvtd_drv.stop = NULL; /* irvtd_stop; */
- irvtd_drv.start = NULL; /* irvtd_start; */
- irvtd_drv.hangup = irvtd_hangup;
-
- irvtd_drv.send_xchar = irvtd_send_xchar;
- irvtd_drv.break_ctl = irvtd_break;
- irvtd_drv.read_proc = NULL;
- irvtd_drv.wait_until_sent = NULL;
-
- /*
- * The callout device is just like normal device except for
- * minor number and the subtype.
- */
-
- /* What is difference between callout device and normal device? */
- /* My system dosen't have /dev/cua??, so we don't need it? :{| */
- irvtd_callout_driver = irvtd_drv;
- irvtd_callout_driver.name = "irninecua";
- irvtd_callout_driver.minor_start = IRVTD_CALLOUT_MINOR;
- irvtd_callout_driver.subtype = IRVTD_TYPE_CALLOUT;
-
-
- if (tty_register_driver(&irvtd_drv)){
- DEBUG(0,"IrCOMM:Couldn't register tty driver\n");
- return(1);
- }
- if (tty_register_driver(&irvtd_callout_driver))
- DEBUG(0,"IrCOMM:Couldn't register callout tty driver\n");
-
- DEBUG( 4, "irvtd_register_ttydriver: done.\n");
- return(0);
-}
-
-
-/*
- * Function irvtd_unregister_device(void)
- * it will be called when you rmmod
- */
-
-void irvtd_unregister_ttydriver(void){
-
- int err;
- DEBUG( 4, "--> irvtd_unregister_device\n");
-
- /* unregister tty device */
-
- err = tty_unregister_driver(&irvtd_drv);
- if (err)
- printk("IrCOMM: failed to unregister vtd driver(%d)\n",err);
- err = tty_unregister_driver(&irvtd_callout_driver);
- if (err)
- printk("IrCOMM: failed to unregister vtd_callout driver(%d)\n", err);
-
- DEBUG( 4, "irvtd_unregister_device -->\n");
- return;
-}
+static void irvtd_start_timer( struct irvtd_cb *driver);
+static void irvtd_timer_expired(unsigned long data);
+static int line_info(char *buf, struct irvtd_cb *driver);
+static int irvtd_read_proc(char *buf, char **start, off_t offset, int len,
+ int *eof, void *unused);
/*
- * ----------------------------------------------------------------------
- * Routines for Virtual tty driver
+ **********************************************************************
*
- * most of infomation is descrived in linux/tty_driver.h, but
- * a function ircomm_receive() derives from receive_chars() which is
- * in 2.0.30 kernel (driver/char/serial.c).
- * if you want to understand them, please see related kernel source
- * (and my comments :).
- * ----------------------------------------------------------------------
- */
-
-/*
- * ----------------------------------------------------------------------
- * ircomm_receive_data()
+ * ircomm_receive_data() and friends
*
* like interrupt handler in the serial.c,we receive data when
* ircomm_data_indication comes
- * ----------------------------------------------------------------------
+ *
+ **********************************************************************
*/
@@ -231,25 +109,24 @@ void irvtd_unregister_ttydriver(void){
* send incoming/queued data to tty
*/
-static void irvtd_write_to_tty( void *instance ){
-
+static void irvtd_write_to_tty( struct irvtd_cb *driver)
+{
int status, c, flag;
-
struct sk_buff *skb;
- struct irvtd_cb *driver = (struct irvtd_cb *)instance;
struct tty_struct *tty = driver->tty;
- /* does instance still exist ? should be checked */
- ASSERT(driver->magic == IRVTD_MAGIC, return;);
-
- if(driver->rx_disable ){
- DEBUG(0,__FUNCTION__"rx_disable is true:do_nothing..\n");
+ if(driver->rx_disable)
return;
- }
-
+
skb = skb_dequeue(&driver->rxbuff);
- ASSERT(skb != NULL, return;); /* there's nothing */
- IS_SKB(skb, return;);
+ if(skb == NULL)
+ return; /* there's nothing */
+
+ /*
+ * we should parse controlchannel field here.
+ * (see process_data() in ircomm.c)
+ */
+ ircomm_parse_tuples(driver->comm, skb, CONTROL_CHANNEL);
#ifdef IRVTD_DEBUG_RX
printk("received data:");
@@ -264,12 +141,6 @@ static void irvtd_write_to_tty( void *instance ){
status = driver->comm->peer_line_status & driver->read_status_mask;
/*
- * FIXME: we must do ircomm_parse_ctrl() here, instead of
- * ircomm_common.c!!
- */
-
-
- /*
* if there are too many errors which make a character ignored,
* drop characters
*/
@@ -289,7 +160,7 @@ static void irvtd_write_to_tty( void *instance ){
DEBUG(0,"handling break....\n");
flag = TTY_BREAK;
- if (driver->flags & IRVTD_ASYNC_SAK)
+ if (driver->flags & ASYNC_SAK)
/*
* do_SAK() seems to be an implementation of the
* idea called "Secure Attention Key",
@@ -311,7 +182,8 @@ static void irvtd_write_to_tty( void *instance ){
flag = TTY_NORMAL;
if(c){
- DEBUG(0,"writing %d chars to tty\n",c);
+ DEBUG(4,"writing %d chars to tty\n",c);
+ driver->icount.rx += c;
memset(tty->flip.flag_buf_ptr, flag, c);
memcpy(tty->flip.char_buf_ptr, skb->data, c);
tty->flip.flag_buf_ptr += c;
@@ -325,205 +197,207 @@ static void irvtd_write_to_tty( void *instance ){
else
{
/* queue rest of data again */
- DEBUG(0,__FUNCTION__":retrying frame!\n");
+ DEBUG(4,__FUNCTION__":retrying frame!\n");
+
+ /* build a dummy control channel */
+ skb_push(skb,1);
+ *skb->data = 0; /* clen is 0 */
skb_queue_head( &driver->rxbuff, skb );
}
- /*
- * in order to optimize this routine, these two tasks should be
- * queued in following order
- * ( see run_task_queue() and queue_task() in tqueue.h
- */
- if(skb_queue_len(&driver->rxbuff))
- /* let me try again! */
- queue_task(&driver->rx_tqueue, &tq_timer);
if(c)
- /* read your buffer! */
- queue_task(&tty->flip.tqueue, &tq_timer);
+ /* let the process read its buffer! */
+ tty_flip_buffer_push(tty);
-
- if(skb_queue_len(&driver->rxbuff)< IRVTD_RX_QUEUE_LOW
- && driver->ttp_stoprx){
+ if(skb_queue_len(&driver->rxbuff)< IRVTD_RX_QUEUE_LOW &&
+ driver->ttp_stoprx){
irttp_flow_request(driver->comm->tsap, FLOW_START);
driver->ttp_stoprx = 0;
}
+
+ if(skb_queue_empty(&driver->rxbuff) && driver->disconnect_pend){
+ /* disconnect */
+ driver->disconnect_pend = 0;
+ driver->rx_disable = 1;
+ tty_hangup(driver->tty);
+ }
}
-void irvtd_receive_data(void *instance, void *sap, struct sk_buff *skb){
-
+static int irvtd_receive_data(void *instance, void *sap, struct sk_buff *skb)
+{
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
- ASSERT(driver != NULL, return;);
- ASSERT(driver->magic == IRVTD_MAGIC, return;);
+ ASSERT(driver != NULL, return -1;);
+ ASSERT(driver->magic == IRVTD_MAGIC, return -1;);
+ DEBUG(4, __FUNCTION__"(): queue frame\n");
/* queue incoming data and make bottom half handler ready */
skb_queue_tail( &driver->rxbuff, skb );
- if(skb_queue_len(&driver->rxbuff) == 1)
- irvtd_write_to_tty(driver);
+
if(skb_queue_len(&driver->rxbuff) > IRVTD_RX_QUEUE_HIGH){
irttp_flow_request(driver->comm->tsap, FLOW_STOP);
driver->ttp_stoprx = 1;
}
- return;
+ irvtd_write_to_tty(driver);
+ return 0;
}
-#if 0
-void irvtd_receive_data(void *instance, void *sap, struct sk_buff *skb){
+/*
+ ***********************************************************************
+ *
+ * irvtd_send_data() and friends
+ *
+ * like interrupt handler in the serial.c,we send data when
+ * a timer is expired
+ *
+ ***********************************************************************
+ */
+
+
+static void irvtd_start_timer( struct irvtd_cb *driver)
+{
+ ASSERT( driver != NULL, return;);
+ ASSERT( driver->magic == IRVTD_MAGIC, return;);
+
+ del_timer( &driver->timer);
- int flag,status;
- __u8 c;
- struct tty_struct *tty;
- struct irvtd_cb *driver = (struct irvtd_cb *)instance;
+ driver->timer.data = (unsigned long) driver;
+ driver->timer.function = &irvtd_timer_expired;
+ driver->timer.expires = jiffies + (HZ / 5); /* 200msec */
+
+ add_timer( &driver->timer);
+}
- ASSERT(driver != NULL, return;);
- ASSERT(driver->magic == IRVTD_MAGIC, return;);
- if(driver->rx_disable ){
- DEBUG(0,__FUNCTION__"rx_disable is true:do nothing\n");
- return;
- }
+static void irvtd_timer_expired(unsigned long data)
+{
+ struct irvtd_cb *driver = (struct irvtd_cb *)data;
- tty = driver->tty;
- status = driver->comm->peer_line_status & driver->read_status_mask;
+ ASSERT(driver != NULL,return;);
+ ASSERT(driver->magic == IRVTD_MAGIC,return;);
+ DEBUG(4, __FUNCTION__"()\n");
- c = MIN(skb->len, (TTY_FLIPBUF_SIZE - tty->flip.count));
- DEBUG(0, __FUNCTION__"skb_len=%d, tty->flip.count=%d \n"
- ,(int)skb->len, tty->flip.count);
+ irvtd_send_data_request(driver);
-#ifdef IRVTD_DEBUG_RX
- printk("received data:");
+ irvtd_write_to_tty(driver);
+
+ /* start our timer again and again */
+ irvtd_start_timer(driver);
+}
+
+
+static void irvtd_send_data_request(struct irvtd_cb *driver)
+{
+ int err;
+ struct sk_buff *skb = driver->txbuff;
+
+ ASSERT(skb != NULL,return;);
+ DEBUG(4, __FUNCTION__"()\n");
+
+ if(driver->tty->hw_stopped || driver->tx_disable)
+ return;
+ if(!skb->len)
+ return; /* no data to send */
+
+#ifdef IRVTD_DEBUG_TX
+ DEBUG(4, "flush_txbuff:count(%d)\n",(int)skb->len);
{
int i;
for ( i=0;i<skb->len;i++)
- printk("%02x ", skb->data[i]);
+ printk("%02x", skb->data[i]);
printk("\n");
}
#endif
- /*
- * if there are too many errors which make a character ignored,
- * drop characters
- */
-
- if(status & driver->ignore_status_mask){
- DEBUG(0,__FUNCTION__"I/O error:ignore characters.\n");
- dev_kfree_skb(skb, FREE_READ);
- return;
- }
-
- if (driver->comm->peer_break_signal ) {
- driver->comm->peer_break_signal = 0;
- DEBUG(0,"handling break....\n");
-
- flag = TTY_BREAK;
- if (driver->flags & IRVTD_ASYNC_SAK)
- /*
- * do_SAK() seems to be an implementation of the
- * idea called "Secure Attention Key",
- * which seems to be discribed in "Orange book".
- * (which is published by U.S.military!!?? )
- * see source of do_SAK() but what is "Orange book"!?
- */
- do_SAK(tty);
- }else if (status & LSR_PE)
- flag = TTY_PARITY;
- else if (status & LSR_FE)
- flag = TTY_FRAME;
- else if (status & LSR_OE)
- flag = TTY_OVERRUN;
- else
- flag = TTY_NORMAL;
+ DEBUG(1, __FUNCTION__"():sending %d octets\n",(int)skb->len );
+ driver->icount.tx += skb->len;
+ err = ircomm_data_request(driver->comm, driver->txbuff);
+ if (err){
+ ASSERT(err == 0,;);
+ DEBUG(0,"%d chars are lost\n",(int)skb->len);
+ skb_trim(skb, 0);
+ }
- if(c){
- DEBUG(0,"writing %d chars to tty\n",c);
- memset(tty->flip.flag_buf_ptr, flag, c);
- memcpy(tty->flip.char_buf_ptr, skb->data, c);
- tty->flip.flag_buf_ptr += c;
- tty->flip.char_buf_ptr += c;
- tty->flip.count += c;
- skb_pull(skb,c);
- queue_task_irq_off(&tty->flip.tqueue, &tq_timer);
+ /* allocate a new frame */
+ skb = driver->txbuff = dev_alloc_skb(driver->comm->max_txbuff_size);
+ if (skb == NULL){
+ printk(__FUNCTION__"():alloc_skb failed!\n");
+ } else {
+ skb_reserve(skb, COMM_HEADER_SIZE);
}
- if(skb->len >0)
- DEBUG(0,__FUNCTION__":dropping frame!\n");
- dev_kfree_skb(skb, FREE_READ);
- DEBUG(4,__FUNCTION__":done\n");
+
+ wake_up_interruptible(&driver->tty->write_wait);
}
-#endif
+
/*
- * ----------------------------------------------------------------------
+ ***********************************************************************
+ *
* indication/confirmation handlers:
- * they will be registerd in irvtd_startup() to know that we
- * discovered (or we are discovered by) remote device.
- * ----------------------------------------------------------------------
+ *
+ * these routines are handlers for IrCOMM protocol stack
+ *
+ ***********************************************************************
*/
-/* this function is called whed ircomm_attach_cable succeed */
-
-void irvtd_attached(struct ircomm_cb *comm){
-
- ASSERT(comm != NULL, return;);
- ASSERT(comm->magic == IRCOMM_MAGIC, return;);
-
- DEBUG(0,"irvtd_attached:sending connect_request"
- " for servicetype(%d)..\n",comm->servicetype);
- ircomm_connect_request(comm, SAR_DISABLE );
-}
-
/*
- * irvtd_connect_confirm()
- * ircomm_connect_request which we have send have succeed!
+ * Function irvtd_connect_confirm (instance, sap, qos, max_sdu_size, skb)
+ *
+ * ircomm_connect_request which we have send have succeed!
+ *
*/
-
void irvtd_connect_confirm(void *instance, void *sap, struct qos_info *qos,
- int max_sdu_size, struct sk_buff *skb){
-
+ __u32 max_sdu_size, struct sk_buff *skb)
+{
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
ASSERT(driver != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
/*
+ * set default value
+ */
+
+ driver->msr |= (MSR_DCD|MSR_RI|MSR_DSR|MSR_CTS);
+
+ /*
* sending initial control parameters here
- *
- * TODO: it must be done in ircomm_connect_request()
*/
-#if 1
if(driver->comm->servicetype == THREE_WIRE_RAW)
return; /* do nothing */
- ircomm_append_ctrl(driver->comm, SERVICETYPE);
- /* ircomm_append_ctrl(self, DATA_RATE); */
- ircomm_append_ctrl(driver->comm, DATA_FORMAT);
- ircomm_append_ctrl(driver->comm, FLOW_CONTROL);
- ircomm_append_ctrl(driver->comm, XON_XOFF_CHAR);
- /* ircomm_append_ctrl(driver->comm, ENQ_ACK_CHAR); */
+ driver->comm->dte |= (MCR_DTR | MCR_RTS | DELTA_DTR | DELTA_RTS);
+
+ ircomm_control_request(driver->comm, SERVICETYPE);
+ ircomm_control_request(driver->comm, DATA_RATE);
+ ircomm_control_request(driver->comm, DATA_FORMAT);
+ ircomm_control_request(driver->comm, FLOW_CONTROL);
+ ircomm_control_request(driver->comm, XON_XOFF_CHAR);
+ /* ircomm_control_request(driver->comm, ENQ_ACK_CHAR); */
switch(driver->comm->servicetype){
case CENTRONICS:
break;
case NINE_WIRE:
- ircomm_append_ctrl(driver->comm, DTELINE_STATE);
+ ircomm_control_request(driver->comm, DTELINE_STATE);
break;
default:
}
- ircomm_control_request(driver->comm);
-#endif
-
+ driver->tx_disable = 0;
wake_up_interruptible(&driver->open_wait);
}
/*
- * irvtd_connect_indication()
- * we are discovered and being requested to connect by remote device !
+ * Function irvtd_connect_indication (instance, sap, qos, max_sdu_size, skb)
+ *
+ * we are discovered and being requested to connect by remote device !
+ *
*/
-
void irvtd_connect_indication(void *instance, void *sap, struct qos_info *qos,
- int max_sdu_size, struct sk_buff *skb)
+ __u32 max_sdu_size, struct sk_buff *skb)
{
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
@@ -535,46 +409,103 @@ void irvtd_connect_indication(void *instance, void *sap, struct qos_info *qos,
DEBUG(4,"irvtd_connect_indication:sending connect_response...\n");
- /*TODO: connect_response should send initialcontrolparameters! TH*/
-
ircomm_connect_response(comm, NULL, SAR_DISABLE );
- wake_up_interruptible(&driver->open_wait);
-}
+ driver->tx_disable = 0;
+ /*
+ * send initial control parameters
+ */
+ if(driver->comm->servicetype == THREE_WIRE_RAW)
+ return; /* do nothing */
+ driver->comm->dte |= (MCR_DTR | MCR_RTS | DELTA_DTR | DELTA_RTS);
+
+ switch(driver->comm->servicetype){
+ case NINE_WIRE:
+ ircomm_control_request(driver->comm, DTELINE_STATE);
+ break;
+ default:
+ }
-void irvtd_disconnect_indication(void *instance, void *sap , LM_REASON reason,
- struct sk_buff *skb){
+ driver->msr |= (MSR_DCD|MSR_RI|MSR_DSR|MSR_CTS);
+ wake_up_interruptible(&driver->open_wait);
+}
+
+/*
+ * Function irvtd_disconnect_indication (instance, sap, reason, skb)
+ *
+ * This is a handler for ircomm_disconnect_indication. since this
+ * function is called in the context of interrupt handler,
+ * interruptible_sleep_on() MUST not be used.
+ */
+void irvtd_disconnect_indication(void *instance, void *sap , LM_REASON reason,
+ struct sk_buff *skb)
+{
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
+
ASSERT(driver != NULL, return;);
ASSERT(driver->tty != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
DEBUG(4,"irvtd_disconnect_indication:\n");
- tty_hangup(driver->tty);
+
+ driver->tx_disable = 1;
+ if(skb_queue_empty(&driver->rxbuff)){
+ /* disconnect */
+ driver->rx_disable = 1;
+ tty_hangup(driver->tty);
+ } else {
+ driver->disconnect_pend = 1;
+ }
}
/*
- * irvtd_control_indication
+ * Function irvtd_control_indication (instance, sap, cmd)
+ *
+ *
*
*/
-
-
-void irvtd_control_indication(void *instance, void *sap, LOCAL_FLOW flow){
-
+void irvtd_control_indication(void *instance, void *sap, IRCOMM_CMD cmd)
+{
struct irvtd_cb *driver = (struct irvtd_cb *)instance;
- __u8 pi; /* instruction of control channel */
ASSERT(driver != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
- DEBUG(0,"irvtd_control_indication:\n");
+ DEBUG(4,__FUNCTION__"()\n");
+
+ if(cmd == TX_READY){
+ driver->ttp_stoptx = 0;
+ driver->tty->hw_stopped = driver->cts_stoptx;
+ irvtd_start_timer( driver);
+
+ if(driver->cts_stoptx)
+ return;
+
+ /*
+ * driver->tty->write_wait will keep asleep if
+ * our txbuff is full.
+ * now that we can send packets to IrTTP layer,
+ * we kick it here.
+ */
+ if ((driver->tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ driver->tty->ldisc.write_wakeup)
+ (driver->tty->ldisc.write_wakeup)(driver->tty);
+ return;
+ }
+
+ if(cmd == TX_BUSY){
+ driver->ttp_stoptx = driver->tty->hw_stopped = 1;
+ del_timer( &driver->timer);
+ return;
+ }
+
- pi = driver->comm->pi;
+ ASSERT(cmd == CONTROL_CHANNEL,return;);
- switch(pi){
+ switch(driver->comm->pi){
case DCELINE_STATE:
driver->msr = driver->comm->peer_dce;
@@ -591,19 +522,23 @@ void irvtd_control_indication(void *instance, void *sap, LOCAL_FLOW flow){
wake_up_interruptible(&driver->delta_msr_wait);
}
- if ((driver->flags & IRVTD_ASYNC_CHECK_CD) && (driver->msr & DELTA_DCD)) {
+ if ((driver->flags & ASYNC_CHECK_CD) && (driver->msr & DELTA_DCD)) {
DEBUG(0,"CD now %s...\n",
(driver->msr & MSR_DCD) ? "on" : "off");
- if (driver->msr & DELTA_DCD)
+ if (driver->msr & MSR_DCD)
+ {
+ /* DCD raised! */
wake_up_interruptible(&driver->open_wait);
- else if (!((driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE) &&
- (driver->flags & IRVTD_ASYNC_CALLOUT_NOHUP))) {
-
- DEBUG(0,"irvtd_control_indication:hangup..\n");
+ }
+ else
+ {
+ /* DCD falled */
+ DEBUG(0,__FUNCTION__"():hangup..\n");
tty_hangup(driver->tty);
}
+
}
if (driver->comm->flow_ctrl & USE_CTS) {
@@ -632,50 +567,38 @@ void irvtd_control_indication(void *instance, void *sap, LOCAL_FLOW flow){
driver->cts_stoptx = 1;
driver->tty->hw_stopped = 1;
-/* driver->IER &= ~UART_IER_THRI; */
-/* serial_out(info, UART_IER, info->IER); */
}
}
}
-
-
break;
- case TX_READY:
- driver->ttp_stoptx = 0;
- driver->tty->hw_stopped = driver->cts_stoptx;
-
- /*
- * driver->tty->write_wait will keep asleep if
- * our txbuff is not empty.
- * so if we can really send a packet now,
- * send it and then wake it up.
- */
-
- if(driver->cts_stoptx)
- break;
-
- flush_txbuff(driver);
- if ((driver->tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- driver->tty->ldisc.write_wakeup)
- (driver->tty->ldisc.write_wakeup)(driver->tty);
- break;
-
- case TX_BUSY:
- driver->ttp_stoptx = driver->tty->hw_stopped = 1;
+ case FLOW_CONTROL:
+ case DATA_RATE:
+ case XON_XOFF_CHAR:
+ case DTELINE_STATE:
+ /* (maybe) nothing to do */
break;
default:
- DEBUG(0,"irvtd:unknown control..\n");
-
+ DEBUG(0,__FUNCTION__"():PI = 0x%02x is not implemented\n",
+ (int)driver->comm->pi);
}
}
+/*
+ ***********************************************************************
+ *
+ * driver kernel interfaces
+ * these functions work as an interface between the kernel and this driver
+ *
+ ***********************************************************************
+ */
+
+
/*
* ----------------------------------------------------------------------
* irvtd_open() and friends
*
- *
* ----------------------------------------------------------------------
*/
@@ -685,114 +608,45 @@ static int irvtd_block_til_ready(struct tty_struct *tty, struct file * filp,
{
struct wait_queue wait = { current, NULL };
- int retval;
+ int retval = 0;
int do_clocal = 0;
/*
- * If the device is in the middle of being closed, then block
- * (sleep) until it's done, and (when being woke up)then try again.
- */
-
- if (tty_hung_up_p(filp) ||
- (driver->flags & IRVTD_ASYNC_CLOSING)) {
- if (driver->flags & IRVTD_ASYNC_CLOSING)
- interruptible_sleep_on(&driver->close_wait);
-#ifdef DO_RESTART
- if (driver->flags & IRVTD_ASYNC_HUP_NOTIFY)
- return -EAGAIN;
- else
- return -ERESTARTSYS;
-#else
- return -EAGAIN;
-#endif
- }
-
- /*
- * If this is a callout device, then just make sure the normal
- * device isn't being used.
- */
-
- if (tty->driver.subtype == IRVTD_TYPE_CALLOUT) {
- if (driver->flags & IRVTD_ASYNC_NORMAL_ACTIVE)
- return -EBUSY;
- if ((driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE) &&
- (driver->flags & IRVTD_ASYNC_SESSION_LOCKOUT) &&
- (driver->session != current->session))
- return -EBUSY;
- if ((driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE) &&
- (driver->flags & IRVTD_ASYNC_PGRP_LOCKOUT) &&
- (driver->pgrp != current->pgrp))
- return -EBUSY;
-
- driver->flags |= IRVTD_ASYNC_CALLOUT_ACTIVE;
- return 0;
- }
-
- /*
* If non-blocking mode is set, or the port is not enabled,
* then make the check up front and then exit.
*/
if ((filp->f_flags & O_NONBLOCK) ||
(tty->flags & (1 << TTY_IO_ERROR))) {
- if (driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE)
- return -EBUSY;
- driver->flags |= IRVTD_ASYNC_NORMAL_ACTIVE;
return 0;
}
- if (driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE) {
- if (driver->normal_termios.c_cflag & CLOCAL)
- do_clocal = 1;
- } else {
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
- }
-
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+
+
/*
* We wait until ircomm_connect_request() succeed or
* ircomm_connect_indication comes
- *
- * This is what is written in serial.c:
- * "Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, driver->count is dropped by one, so that
- * rs_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal."
*/
- retval = 0;
+
add_wait_queue(&driver->open_wait, &wait);
- DEBUG(0,"block_til_ready before block: line%d, count = %d\n",
- driver->line, driver->count);
+ DEBUG(0,__FUNCTION__"():before block( line = %d, count = %d )\n",
+ driver->line, driver->count);
- cli();
- if (!tty_hung_up_p(filp))
- driver->count--;
- sti();
driver->blocked_open++;
-
+ /* wait for a connection established */
while (1) {
current->state = TASK_INTERRUPTIBLE;
-
- if (!(driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE) &&
- (driver->comm->state == COMM_CONN)){
- /*
- * signal DTR and RTS
- */
- driver->comm->dte = driver->mcr |= (MCR_DTR | MCR_RTS |DELTA_DTR|DELTA_RTS);
-
- ircomm_append_ctrl(driver->comm, DTELINE_STATE);
- ircomm_control_request(driver->comm);
- }
-
+
if (tty_hung_up_p(filp) ||
- !(driver->flags & IRVTD_ASYNC_INITIALIZED)) {
+ !(driver->flags & ASYNC_INITIALIZED)) {
#ifdef DO_RESTART
- if (driver->flags & IRVTD_ASYNC_HUP_NOTIFY)
+ if (driver->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
else
retval = -ERESTARTSYS;
@@ -802,15 +656,9 @@ static int irvtd_block_til_ready(struct tty_struct *tty, struct file * filp,
break;
}
- /*
- * if clocal == 0 or received DCD or state become CONN,then break
- */
-
- if (!(driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE) &&
- !(driver->flags & IRVTD_ASYNC_CLOSING) &&
+ if (!(driver->flags & ASYNC_CLOSING) &&
(driver->comm->state == COMM_CONN) &&
- ( do_clocal || (driver->msr & MSR_DCD) )
- )
+ ( do_clocal || (driver->msr & MSR_DCD)))
break;
if(signal_pending(current)){
@@ -818,63 +666,51 @@ static int irvtd_block_til_ready(struct tty_struct *tty, struct file * filp,
break;
}
-#ifdef IRVTD_DEBUG_OPEN
- printk(KERN_INFO"block_til_ready blocking:"
- " ttys%d, count = %d\n", driver->line, driver->count);
-#endif
+
+ DEBUG(4,__FUNCTION__"():blocking( %s%d, count = %d )\n",
+ tty->driver.name, driver->line, driver->count);
+
schedule();
}
current->state = TASK_RUNNING;
remove_wait_queue(&driver->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- driver->count++;
driver->blocked_open--;
-#ifdef IRVTD_DEBUG_OPEN
- printk("block_til_ready after blocking: ttys%d, count = %d\n",
- driver->line, driver->count);
-#endif
+
+ DEBUG(0, __FUNCTION__"():after blocking\n");
+
if (retval)
return retval;
- driver->flags |= IRVTD_ASYNC_NORMAL_ACTIVE;
return 0;
}
-static void change_speed(struct irvtd_cb *driver){
-
+static void change_speed(struct irvtd_cb *driver)
+{
unsigned cflag,cval;
if (!driver->tty || !driver->tty->termios || !driver->comm)
return;
cflag = driver->tty->termios->c_cflag;
-
-
- /*
- * change baud rate here. but not implemented now
- */
-
-
-
-
/*
* byte size and parity
*/
- switch (cflag & CSIZE) {
- case CS5: cval = 0x00; break;
- case CS6: cval = 0x01; break;
- case CS7: cval = 0x02; break;
- case CS8: cval = 0x03; break;
- default: cval = 0x00; break; /* too keep GCC shut... */
+ switch (cflag & CSIZE)
+ {
+ case CS5: cval = IRCOMM_WLEN5; break;
+ case CS6: cval = IRCOMM_WLEN6; break;
+ case CS7: cval = IRCOMM_WLEN7; break;
+ case CS8: cval = IRCOMM_WLEN8; break;
+ default: cval = IRCOMM_WLEN5; break; /* too keep GCC shut... */
}
if (cflag & CSTOPB) { /* use 2 stop bit mode */
- cval |= 0x04;
+ cval |= IRCOMM_STOP2;
}
if (cflag & PARENB)
- cval |= 0x08;
+ cval |= IRCOMM_PARENB; /* enable parity check */
if (!(cflag & PARODD))
- cval |= 0x10;
+ cval |= IRCOMM_PAREVEN; /* even parity */
/* CTS flow control flag and modem status interrupts */
@@ -884,74 +720,68 @@ static void change_speed(struct irvtd_cb *driver){
driver->comm->flow_ctrl |= ~USE_CTS;
if (cflag & CLOCAL)
- driver->flags &= ~IRVTD_ASYNC_CHECK_CD;
+ driver->flags &= ~ASYNC_CHECK_CD;
else
- driver->flags |= IRVTD_ASYNC_CHECK_CD;
+ driver->flags |= ASYNC_CHECK_CD;
/*
* Set up parity check flag
*/
- driver->read_status_mask = LSR_OE ;
+ driver->read_status_mask = LSR_OE;
if (I_INPCK(driver->tty))
driver->read_status_mask |= LSR_FE | LSR_PE;
if (I_BRKINT(driver->tty) || I_PARMRK(driver->tty))
driver->read_status_mask |= LSR_BI;
+ /*
+ * Characters to ignore
+ */
driver->ignore_status_mask = 0;
+ if (I_IGNPAR(driver->tty))
+ driver->ignore_status_mask |= LSR_PE | LSR_FE;
if (I_IGNBRK(driver->tty)) {
driver->ignore_status_mask |= LSR_BI;
- driver->read_status_mask |= LSR_BI;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
- if (I_IGNPAR(driver->tty)) {
- driver->ignore_status_mask |= LSR_OE | \
- LSR_PE | LSR_FE;
- driver->read_status_mask |= LSR_OE | \
- LSR_PE | LSR_FE;
- }
+ if (I_IGNPAR(driver->tty))
+ driver->ignore_status_mask |= LSR_OE;
}
- driver->comm->data_format = cval;
- ircomm_append_ctrl(driver->comm, DATA_FORMAT);
- ircomm_append_ctrl(driver->comm, FLOW_CONTROL);
- ircomm_control_request(driver->comm);
- /* output to IrCOMM here*/
+ driver->comm->data_format = cval;
+ ircomm_control_request(driver->comm, DATA_FORMAT);
+ ircomm_control_request(driver->comm, FLOW_CONTROL);
}
-static int irvtd_startup(struct irvtd_cb *driver){
-
+static int irvtd_startup(struct irvtd_cb *driver)
+{
+ struct ias_object* obj;
struct notify_t irvtd_notify;
+ /* FIXME: it should not be hard coded */
+ __u8 oct_seq[6] = { 0,1,4,1,1,1 };
- DEBUG(4,"irvtd_startup:\n" );
+ DEBUG(4,__FUNCTION__"()\n" );
+ if(driver->flags & ASYNC_INITIALIZED)
+ return 0;
/*
* initialize our tx/rx buffer
*/
- if(driver->flags & IRVTD_ASYNC_INITIALIZED)
- return(0);
-
skb_queue_head_init(&driver->rxbuff);
- driver->rx_tqueue.data = driver;
- driver->rx_tqueue.routine = irvtd_write_to_tty;
-
- if(!driver->txbuff){
- driver->txbuff = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
- if (!driver->txbuff){
- DEBUG(0,"irvtd_open():alloc_skb failed!\n");
- return -ENOMEM;
- }
-
- skb_reserve(driver->txbuff, COMM_HEADER_SIZE);
+ driver->txbuff = dev_alloc_skb(COMM_DEFAULT_DATA_SIZE);
+ if (!driver->txbuff){
+ DEBUG(0,__FUNCTION__"():alloc_skb failed!\n");
+ return -ENOMEM;
}
+ skb_reserve(driver->txbuff, COMM_HEADER_SIZE);
irda_notify_init(&irvtd_notify);
irvtd_notify.data_indication = irvtd_receive_data;
@@ -959,18 +789,33 @@ static int irvtd_startup(struct irvtd_cb *driver){
irvtd_notify.connect_indication = irvtd_connect_indication;
irvtd_notify.disconnect_indication = irvtd_disconnect_indication;
irvtd_notify.flow_indication = irvtd_control_indication;
+ strncpy( irvtd_notify.name, "ircomm_tty", NOTIFY_MAX_NAME);
irvtd_notify.instance = driver;
- strncpy( irvtd_notify.name, "irvtd", NOTIFY_MAX_NAME);
+
+ driver->comm = ircomm_open_instance(irvtd_notify);
+ if(!driver->comm){
+ return -ENODEV;
+ }
+
+
+ /*
+ * Register with LM-IAS as a server
+ */
+
+ obj = irias_new_object( "IrDA:IrCOMM", IAS_IRCOMM_ID);
+ irias_add_integer_attrib( obj, "IrDA:TinyTP:LsapSel",
+ driver->comm->tsap->stsap_sel );
+
+ irias_add_octseq_attrib( obj, "Parameters", &oct_seq[0], 6);
+ irias_insert_object( obj);
+
+ driver->flags |= ASYNC_INITIALIZED;
/*
- * register ourself as a service user of IrCOMM
- * TODO: other servicetype(i.e. 3wire,3wireraw)
+ * discover a peer device
+ * TODO: other servicetype(i.e. 3wire,3wireraw) support
*/
-
- driver->comm = ircomm_attach_cable(NINE_WIRE, irvtd_notify,
- irvtd_attached);
- if(driver->comm == NULL)
- return -ENODEV;
+ ircomm_connect_request(driver->comm, NINE_WIRE);
/*
* TODO:we have to initialize control-channel here!
@@ -981,63 +826,83 @@ static int irvtd_startup(struct irvtd_cb *driver){
clear_bit(TTY_IO_ERROR, &driver->tty->flags);
change_speed(driver);
+ irvtd_start_timer( driver);
- driver->flags |= IRVTD_ASYNC_INITIALIZED;
+ driver->rx_disable = 0;
+ driver->tx_disable = 1;
+ driver->disconnect_pend = 0;
return 0;
}
-int irvtd_open(struct tty_struct * tty, struct file * filp){
-
+int irvtd_open(struct tty_struct * tty, struct file * filp)
+{
struct irvtd_cb *driver;
int retval;
int line;
- DEBUG(4, "irvtd_open():\n");
+ DEBUG(4, __FUNCTION__"():\n");
+ MOD_INC_USE_COUNT;
line = MINOR(tty->device) - tty->driver.minor_start;
- if ((line <0) || (line >= COMM_MAX_TTY))
+ if ((line <0) || (line >= COMM_MAX_TTY)){
+ MOD_DEC_USE_COUNT;
return -ENODEV;
+ }
+
driver = irvtd[line];
- driver->line = line;
+ ASSERT(driver != NULL, MOD_DEC_USE_COUNT;return -ENOMEM;);
+ ASSERT(driver->magic == IRVTD_MAGIC, MOD_DEC_USE_COUNT;return -EINVAL;);
+
driver->count++;
- DEBUG(0, "irvtd_open : %s%d count %d\n", tty->driver.name, line,
+ DEBUG(0, __FUNCTION__"():%s%d count %d\n", tty->driver.name, line,
driver->count);
tty->driver_data = driver;
driver->tty = tty;
-
+ driver->tty->low_latency = (driver->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ /*
+ * If the device is in the middle of being closed, then block
+ * (sleep) until it's done, then exit.
+ */
+
+ if (tty_hung_up_p(filp) ||
+ (driver->flags & ASYNC_CLOSING)) {
+ if (driver->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&driver->close_wait);
+#ifdef DO_RESTART
+ if (driver->flags & ASYNC_HUP_NOTIFY)
+ return -EAGAIN;
+ else
+ return -ERESTARTSYS;
+#else
+ return -EAGAIN;
+#endif
+ }
+
/*
* start up discovering process and ircomm_layer
*/
retval = irvtd_startup(driver);
- if (retval)
+ if (retval){
+ DEBUG(0, __FUNCTION__"():irvtd_startup returns %d\n",retval);
return retval;
- MOD_INC_USE_COUNT;
+ }
retval = irvtd_block_til_ready(tty, filp, driver);
if (retval){
- DEBUG(0,"irvtd_open returning after block_til_ready with %d\n",
- retval);
+ DEBUG(0,__FUNCTION__
+ "():returning after block_til_ready (errno = %d)\n", retval);
return retval;
}
- if ((driver->count == 1) && driver->flags & IRVTD_ASYNC_SPLIT_TERMIOS){
- if(tty->driver.subtype == IRVTD_TYPE_NORMAL)
- *tty->termios = driver->normal_termios;
- else
- *tty->termios = driver->callout_termios;
-
- change_speed(driver);
- }
-
driver->session = current->session;
driver->pgrp = current->pgrp;
- driver->rx_disable = 0;
- return (0);
+ return 0;
}
@@ -1052,15 +917,55 @@ int irvtd_open(struct tty_struct * tty, struct file * filp){
* ----------------------------------------------------------------------
*/
+/*
+ * Function irvtd_wait_until_sent (tty, timeout)
+ *
+ * wait until Tx queue of IrTTP is empty
+ *
+ */
+static void irvtd_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
+ unsigned long orig_jiffies;
+
+ ASSERT(driver != NULL, return;);
+ ASSERT(driver->magic == IRVTD_MAGIC, return;);
+ ASSERT(driver->comm != NULL, return;);
+
+ DEBUG(1, __FUNCTION__"():\n");
+ if(!tty->closing)
+ return; /* nothing to do */
+
+ /*
+ * at disconnection, we should wait until Tx queue of IrTTP is
+ * flushed
+ */
+
+ ircomm_disconnect_request(driver->comm, NULL, P_NORMAL);
+ orig_jiffies = jiffies;
+
+ while (driver->comm->tsap->disconnect_pend) {
+ DEBUG(1, __FUNCTION__"():wait..\n");
+ current->state = TASK_INTERRUPTIBLE;
+ current->counter = 0; /* make us low-priority */
+ schedule_timeout(HZ); /* 1sec */
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+ current->state = TASK_RUNNING;
+}
+
static void irvtd_shutdown(struct irvtd_cb * driver)
{
unsigned long flags;
- if (!(driver->flags & IRVTD_ASYNC_INITIALIZED))
+ if (!(driver->flags & ASYNC_INITIALIZED))
return;
- DEBUG(4,"irvtd_shutdown:\n");
+ DEBUG(1,__FUNCTION__"()\n");
/*
* This comment is written in serial.c:
@@ -1075,8 +980,8 @@ static void irvtd_shutdown(struct irvtd_cb * driver)
driver->mcr &= ~(MCR_DTR|MCR_RTS);
driver->comm->dte = driver->mcr;
- ircomm_append_ctrl(driver->comm, DTELINE_STATE );
- ircomm_control_request(driver->comm);
+ ircomm_control_request(driver->comm, DTELINE_STATE );
+
save_flags(flags); cli(); /* Disable interrupts */
@@ -1084,42 +989,49 @@ static void irvtd_shutdown(struct irvtd_cb * driver)
if (driver->tty)
set_bit(TTY_IO_ERROR, &driver->tty->flags);
- ircomm_detach_cable(driver->comm);
+ del_timer( &driver->timer);
+
+ irias_delete_object("IrDA:IrCOMM");
/*
* Free the transmit buffer here
*/
+
+ while(skb_queue_len(&driver->rxbuff)){
+ struct sk_buff *skb;
+ skb = skb_dequeue( &driver->rxbuff);
+ dev_kfree_skb(skb);
+ }
if(driver->txbuff){
- dev_kfree_skb(driver->txbuff); /* is it OK?*/
+ dev_kfree_skb(driver->txbuff);
driver->txbuff = NULL;
}
-
- driver->flags &= ~IRVTD_ASYNC_INITIALIZED;
+ ircomm_close_instance(driver->comm);
+ driver->comm = NULL;
+ driver->flags &= ~ASYNC_INITIALIZED;
restore_flags(flags);
}
-
-
-void irvtd_close(struct tty_struct * tty, struct file * filp){
-
+void irvtd_close(struct tty_struct * tty, struct file * filp)
+{
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
int line;
unsigned long flags;
- DEBUG(0, "irvtd_close:refc(%d)\n",ircomm_vsd_refcount);
+ DEBUG(1, __FUNCTION__"():refcount= %d\n",irvtd_refcount);
ASSERT(driver != NULL, return;);
ASSERT(driver->magic == IRVTD_MAGIC, return;);
save_flags(flags);cli();
- /*
- * tty_hung_up_p() is defined as
- * " return(filp->f_op == &hung_up_tty_fops); "
- * see driver/char/tty_io.c
- */
if(tty_hung_up_p(filp)){
+ /*
+ * upper tty layer caught a HUP signal and called irvtd_hangup()
+ * before. so we do nothing here.
+ */
+ DEBUG(1, __FUNCTION__"():tty_hung_up_p.\n");
MOD_DEC_USE_COUNT;
restore_flags(flags);
return;
@@ -1127,7 +1039,7 @@ void irvtd_close(struct tty_struct * tty, struct file * filp){
line = MINOR(tty->device) - tty->driver.minor_start;
- DEBUG(0, "irvtd_close : %s%d count %d\n", tty->driver.name, line,
+ DEBUG(0, __FUNCTION__"():%s%d count %d\n", tty->driver.name, line,
driver->count);
if ((tty->count == 1) && (driver->count != 1)) {
@@ -1143,313 +1055,218 @@ void irvtd_close(struct tty_struct * tty, struct file * filp){
driver->count = 1;
}
if (--driver->count < 0) {
- printk("irvtd_close: bad count for line%d: %d\n",
+ printk(KERN_ERR"irvtd_close: bad count for line%d: %d\n",
line, driver->count);
driver->count = 0;
}
if (driver->count) { /* do nothing */
+ DEBUG(0, __FUNCTION__"():driver->count is not 0\n");
MOD_DEC_USE_COUNT;
restore_flags(flags);
return;
}
- driver->flags |= IRVTD_ASYNC_CLOSING;
-
- /*
- * Save the termios structure, since this port may have
- * separate termios for callout and dialin.
- */
-
- if (driver->flags & IRVTD_ASYNC_NORMAL_ACTIVE)
- driver->normal_termios = *tty->termios;
- if (driver->flags & IRVTD_ASYNC_CALLOUT_ACTIVE)
- driver->callout_termios = *tty->termios;
+ driver->flags |= ASYNC_CLOSING;
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
*/
tty->closing = 1;
- if (driver->closing_wait != IRVTD_ASYNC_CLOSING_WAIT_NONE)
+ if (driver->closing_wait != ASYNC_CLOSING_WAIT_NONE){
+ DEBUG(4, __FUNCTION__"():calling tty_wait_until_sent()\n");
tty_wait_until_sent(tty, driver->closing_wait);
+ }
+ /*
+ * we can send disconnect_request with P_HIGH since
+ * tty_wait_until_sent() and irvtd_wait_until_sent() should
+ * have disconnected the link
+ */
+ ircomm_disconnect_request(driver->comm, NULL, P_HIGH);
/*
* Now we stop accepting input.
*/
driver->rx_disable = TRUE;
-
- /*
- * Now we flush our buffer.., and shutdown ircomm service layer
- */
-
- /* drop our tx/rx buffer */
- if (tty->driver.flush_buffer)
- tty->driver.flush_buffer(tty);
-
- while(skb_queue_len(&driver->rxbuff)){
- struct sk_buff *skb;
- skb = skb_dequeue( &driver->rxbuff);
- dev_kfree_skb(skb);
- }
-
- /* drop users buffer? */
+ /* drop ldisc's buffer */
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
-
+ if (tty->driver.flush_buffer)
+ tty->driver.flush_buffer(driver->tty);
tty->closing = 0;
driver->tty = NULL;
- /*
- * ad-hoc coding:
- * we wait 2 sec before ircomm_detach_cable so that
- * irttp will send all contents of its queue
- */
-
-#if 0
- if (driver->blocked_open) {
+ if (driver->blocked_open)
+ {
if (driver->close_delay) {
-#endif
-
/* kill time */
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(driver->close_delay + 2*HZ);
-#if 0
+ schedule_timeout(driver->close_delay);
}
wake_up_interruptible(&driver->open_wait);
}
-#endif
-
- driver->flags &= ~(IRVTD_ASYNC_NORMAL_ACTIVE|
- IRVTD_ASYNC_CALLOUT_ACTIVE|
- IRVTD_ASYNC_CLOSING);
+ irvtd_shutdown(driver);
+ driver->flags &= ~ASYNC_CLOSING;
wake_up_interruptible(&driver->close_wait);
- irvtd_shutdown(driver);
MOD_DEC_USE_COUNT;
restore_flags(flags);
- DEBUG(4,"irvtd_close:done:refc(%d)\n",ircomm_vsd_refcount);
+ DEBUG(4, __FUNCTION__"():done\n");
}
-
-
/*
* ----------------------------------------------------------------------
* irvtd_write() and friends
* This routine will be called when something data are passed from
* kernel or user.
- *
- * NOTE:I have stolen copy_from_user() from 2.0.30 kernel(linux/isdnif.h)
- * to access user space of memory carefully. Thanks a lot!:)
* ----------------------------------------------------------------------
*/
int irvtd_write(struct tty_struct * tty, int from_user,
- const unsigned char *buf, int count){
-
- struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
+ const unsigned char *buf, int count)
+{
+ struct irvtd_cb *driver;
int c = 0;
int wrote = 0;
- struct sk_buff *skb = NULL;
+ unsigned long flags;
+ struct sk_buff *skb;
__u8 *frame;
- DEBUG(4, "irvtd_write():\n");
+ ASSERT(tty != NULL, return -EFAULT;);
+ driver = (struct irvtd_cb *)tty->driver_data;
+ ASSERT(driver != NULL, return -EFAULT;);
+ ASSERT(driver->magic == IRVTD_MAGIC, return -EFAULT;);
- if (!tty || !driver->txbuff)
- return 0;
+ DEBUG(4, __FUNCTION__"()\n");
-
-
+ save_flags(flags);
while(1){
+ cli();
skb = driver->txbuff;
-
- c = MIN(count, (skb_tailroom(skb) - COMM_HEADER_SIZE));
- if (c <= 0)
+ ASSERT(skb != NULL, break;);
+ c = MIN(count, (skb_tailroom(skb)));
+ if (c <= 0)
break;
/* write to the frame */
-
frame = skb_put(skb,c);
if(from_user){
copy_from_user(frame,buf,c);
} else
memcpy(frame, buf, c);
- /* flush the frame */
- irvtd_flush_chars(tty);
+ restore_flags(flags);
wrote += c;
count -= c;
+ buf += c;
+ irvtd_send_data_request(driver);
}
+ restore_flags(flags);
return (wrote);
}
+void irvtd_flush_chars(struct tty_struct *tty)
+{
+ struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
+ ASSERT( driver != NULL, return;);
+ ASSERT( driver->magic == IRVTD_MAGIC, return;);
+
+ DEBUG(4, __FUNCTION__"()\n");
+ irvtd_send_data_request(driver);
+}
+
+
/*
- * ----------------------------------------------------------------------
- * irvtd_put_char()
- * This routine is called by the kernel to pass a single character.
- * If we exausted our buffer,we can ignore the character!
- * ----------------------------------------------------------------------
+ * Function irvtd_put_char (tty, ch)
+ *
+ * This routine is called by the kernel to pass a single character.
+ * If we exausted our buffer,we can ignore the character!
+ *
*/
-void irvtd_put_char(struct tty_struct *tty, unsigned char ch){
-
+void irvtd_put_char(struct tty_struct *tty, unsigned char ch)
+{
__u8 *frame ;
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
- struct sk_buff *skb = driver->txbuff;
+ struct sk_buff *skb;
+ unsigned long flags;
- ASSERT(tty->driver_data != NULL, return;);
+ ASSERT(driver != NULL, return;);
+ DEBUG(4, __FUNCTION__"()\n");
- DEBUG(4, "irvtd_put_char:\n");
- if(!driver->txbuff)
- return;
+ save_flags(flags);cli();
+ skb = driver->txbuff;
+ ASSERT(skb != NULL,return;);
+ ASSERT(skb_tailroom(skb) > 0, return;);
DEBUG(4, "irvtd_put_char(0x%02x) skb_len(%d) MAX(%d):\n",
(int)ch ,(int)skb->len,
- driver->comm->maxsdusize - COMM_HEADER_SIZE);
+ driver->comm->max_txbuff_size - COMM_HEADER_SIZE);
/* append a character */
-
frame = skb_put(skb,1);
frame[0] = ch;
- return;
-}
-
-/*
- * ----------------------------------------------------------------------
- * irvtd_flush_chars() and friend
- * This routine will be called after a series of characters was written using
- * irvtd_put_char().We have to send them down to IrCOMM.
- * ----------------------------------------------------------------------
- */
-
-static void flush_txbuff(struct irvtd_cb *driver){
-
- struct sk_buff *skb = driver->txbuff;
- struct tty_struct *tty = driver->tty;
- ASSERT(tty != NULL, return;);
-
-#ifdef IRVTD_DEBUG_TX
- printk("flush_txbuff:");
- {
- int i;
- for ( i=0;i<skb->len;i++)
- printk("%02x", skb->data[i]);
- printk("\n");
- }
-#else
- DEBUG(4, "flush_txbuff:count(%d)\n",(int)skb->len);
-#endif
-
- /* add "clen" field */
- skb_push(skb,1);
- skb->data[0]=0; /* without control channel */
-
- ircomm_data_request(driver->comm, driver->txbuff);
-
- /* allocate new frame */
- skb = driver->txbuff = dev_alloc_skb(driver->comm->max_txbuff_size);
- if (skb == NULL){
- printk(KERN_ERR"flush_txbuff():alloc_skb failed!\n");
- } else {
- skb_reserve(skb, COMM_HEADER_SIZE);
- }
- wake_up_interruptible(&driver->tty->write_wait);
-}
-
-void irvtd_flush_chars(struct tty_struct *tty){
-
- struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
- if(!driver || driver->magic != IRVTD_MAGIC || !driver->txbuff){
- DEBUG(0,"irvtd_flush_chars:null structure:ignore\n");
- return;
- }
- DEBUG(4, "irvtd_flush_chars():\n");
-
- while(tty->hw_stopped){
- DEBUG(4,"irvtd_flush_chars:hw_stopped:sleep..\n");
- tty_wait_until_sent(tty,0);
- DEBUG(4,"irvtd_flush_chars:waken up!\n");
- if(!driver->txbuff->len)
- return;
- }
- flush_txbuff(driver);
+ restore_flags(flags);
+ return;
}
-
-
-
/*
- * ----------------------------------------------------------------------
- * irvtd_write_room()
- * This routine returns the room that our buffer has now.
+ * Function irvtd_write_room (tty)
+ *
+ * This routine returns the room that our buffer has now.
*
- * NOTE:
- * driver/char/n_tty.c drops a character(s) when this routine returns 0,
- * and then linux will be frozen after a few minutes :( why? bug?
- * ( I found this on linux-2.0.33 )
- * So this routine flushes a buffer if there is few room, TH
- * ----------------------------------------------------------------------
*/
-
int irvtd_write_room(struct tty_struct *tty){
int ret;
- struct sk_buff *skb = (struct sk_buff *)((struct irvtd_cb *) tty->driver_data)->txbuff;
-
- if(!skb){
- DEBUG(0,"irvtd_write_room:NULL skb\n");
- return(0);
- }
+ struct sk_buff *skb = ((struct irvtd_cb *) tty->driver_data)->txbuff;
- ret = skb_tailroom(skb) - COMM_HEADER_SIZE;
-
- if(ret < 0){
- DEBUG(0,"irvtd_write_room:error:room is %d!",ret);
- ret = 0;
- }
- DEBUG(4, "irvtd_write_room:\n");
- DEBUG(4, "retval(%d)\n",ret);
+ ASSERT(skb !=NULL, return 0;);
+ ret = skb_tailroom(skb);
- /* flush buffer automatically to avoid kernel freeze :< */
- if(ret < 8) /* why 8? there's no reason :) */
- irvtd_flush_chars(tty);
+ DEBUG(4, __FUNCTION__"(): room is %d bytes\n",ret);
return(ret);
}
/*
- * ----------------------------------------------------------------------
- * irvtd_chars_in_buffer()
- * This function returns how many characters which have not been sent yet
- * are still in buffer.
- * ----------------------------------------------------------------------
+ * Function irvtd_chars_in_buffer (tty)
+ *
+ * This function returns how many characters which have not been sent yet
+ * are still in buffer.
+ *
*/
-
int irvtd_chars_in_buffer(struct tty_struct *tty){
- struct sk_buff *skb =
- (struct sk_buff *) ((struct irvtd_cb *)tty->driver_data) ->txbuff;
- DEBUG(4, "irvtd_chars_in_buffer()\n");
+ struct sk_buff *skb;
+ unsigned long flags;
- if(!skb){
- printk(KERN_ERR"irvtd_chars_in_buffer:NULL skb\n");
- return(0);
- }
+ DEBUG(4, __FUNCTION__"()\n");
+
+ save_flags(flags);cli();
+ skb = ((struct irvtd_cb *) tty->driver_data)->txbuff;
+ if(skb == NULL) goto err;
+
+ restore_flags(flags);
return (skb->len );
+err:
+ ASSERT(skb != NULL, ;);
+ restore_flags(flags);
+ return 0; /* why not -EFAULT or such? see driver/char/serial.c */
}
/*
- * ----------------------------------------------------------------------
- * irvtd_break()
- * routine which turns the break handling on or off
- * ----------------------------------------------------------------------
+ * Function irvtd_break (tty, break_state)
+ *
+ * Routine which turns the break handling on or off
+ *
*/
-
static void irvtd_break(struct tty_struct *tty, int break_state){
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
@@ -1463,14 +1280,14 @@ static void irvtd_break(struct tty_struct *tty, int break_state){
if (break_state == -1)
{
driver->comm->break_signal = 0x01;
- ircomm_append_ctrl(driver->comm, BREAK_SIGNAL);
- ircomm_control_request(driver->comm);
+ ircomm_control_request(driver->comm, BREAK_SIGNAL);
+
}
else
{
driver->comm->break_signal = 0x00;
- ircomm_append_ctrl(driver->comm, BREAK_SIGNAL);
- ircomm_control_request(driver->comm);
+ ircomm_control_request(driver->comm, BREAK_SIGNAL);
+
}
restore_flags(flags);
@@ -1497,8 +1314,7 @@ static int get_modem_info(struct irvtd_cb * driver, unsigned int *value)
| ((driver->msr & DELTA_RI) ? TIOCM_RNG : 0)
| ((driver->msr & DELTA_DSR) ? TIOCM_DSR : 0)
| ((driver->msr & DELTA_CTS) ? TIOCM_CTS : 0);
- put_user(result,value);
- return 0;
+ return put_user(result,value);
}
static int set_modem_info(struct irvtd_cb * driver, unsigned int cmd,
@@ -1537,19 +1353,111 @@ static int set_modem_info(struct irvtd_cb * driver, unsigned int cmd,
}
driver->comm->dte = driver->mcr;
- ircomm_append_ctrl(driver->comm, DTELINE_STATE );
- ircomm_control_request(driver->comm);
+ ircomm_control_request(driver->comm, DTELINE_STATE );
+
return 0;
}
-int irvtd_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg){
+static int get_serial_info(struct irvtd_cb * driver,
+ struct serial_struct * retinfo)
+{
+ struct serial_struct tmp;
+
+ if (!retinfo)
+ return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.line = driver->line;
+ tmp.flags = driver->flags;
+ tmp.baud_base = driver->comm->data_rate;
+ tmp.close_delay = driver->close_delay;
+ tmp.closing_wait = driver->closing_wait;
+
+ /* for compatibility */
+
+ tmp.type = PORT_16550A;
+ tmp.port = 0;
+ tmp.irq = 0;
+ tmp.xmit_fifo_size = 0;
+ tmp.hub6 = 0;
+ tmp.custom_divisor = driver->custom_divisor;
+
+ if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_serial_info(struct irvtd_cb * driver,
+ struct serial_struct * new_info)
+{
+ struct serial_struct new_serial;
+ struct irvtd_cb old_driver;
+
+ if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
+ return -EFAULT;
+
+ old_driver = *driver;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((new_serial.baud_base != driver->comm->data_rate) ||
+ (new_serial.close_delay != driver->close_delay) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) !=
+ (driver->flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ driver->flags = ((driver->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ driver->custom_divisor = new_serial.custom_divisor;
+ goto check_and_exit;
+ }
+
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+
+ if(driver->comm->data_rate != new_serial.baud_base){
+ driver->comm->data_rate = new_serial.baud_base;
+ if(driver->comm->state == COMM_CONN)
+ ircomm_control_request(driver->comm,DATA_RATE);
+ }
+ driver->close_delay = new_serial.close_delay * HZ/100;
+ driver->closing_wait = new_serial.closing_wait * HZ/100;
+ driver->custom_divisor = new_serial.custom_divisor;
+
+ driver->flags = ((driver->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ driver->tty->low_latency = (driver->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ check_and_exit:
+ if (driver->flags & ASYNC_INITIALIZED) {
+ if (((old_driver.flags & ASYNC_SPD_MASK) !=
+ (driver->flags & ASYNC_SPD_MASK)) ||
+ (old_driver.custom_divisor != driver->custom_divisor)) {
+ if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
+ driver->tty->alt_speed = 57600;
+ if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
+ driver->tty->alt_speed = 115200;
+ if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
+ driver->tty->alt_speed = 230400;
+ if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
+ driver->tty->alt_speed = 460800;
+ change_speed(driver);
+ }
+ }
+ return 0;
+}
+
+
+
+int irvtd_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
int error;
+ unsigned long flags;
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
- struct icounter_struct cnow;
- struct icounter_struct *p_cuser; /* user space */
+ struct serial_icounter_struct cnow,cprev;
+ struct serial_icounter_struct *p_cuser; /* user space */
DEBUG(4,"irvtd_ioctl:requested ioctl(0x%08x)\n",cmd);
@@ -1557,7 +1465,6 @@ int irvtd_ioctl(struct tty_struct *tty, struct file * file,
#ifdef IRVTD_DEBUG_IOCTL
{
/* kill time so that debug messages will come slowly */
- unsigned long flags;
save_flags(flags);cli();
current->state = TASK_INTERRUPTIBLE;
current->timeout = jiffies + HZ/4; /*0.25sec*/
@@ -1572,7 +1479,6 @@ int irvtd_ioctl(struct tty_struct *tty, struct file * file,
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty->flags & (1 << TTY_IO_ERROR)){
- DEBUG(0,"irvtd_ioctl:I/O error...\n");
return -EIO;
}
}
@@ -1580,143 +1486,89 @@ int irvtd_ioctl(struct tty_struct *tty, struct file * file,
switch (cmd) {
case TIOCMGET:
- error = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int));
- if (error)
- return error;
return get_modem_info(driver, (unsigned int *) arg);
case TIOCMBIS:
case TIOCMBIC:
case TIOCMSET:
return set_modem_info(driver, cmd, (unsigned int *) arg);
-#if 0
- /*
- * we wouldn't implement them since we don't use serial_struct
- */
case TIOCGSERIAL:
- error = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct));
- if (error)
- return error;
- return irvtd_get_serial_info(driver,
- (struct serial_struct *) arg);
+ return get_serial_info(driver, (struct serial_struct *) arg);
case TIOCSSERIAL:
- error = verify_area(VERIFY_READ, (void *) arg,
- sizeof(struct serial_struct));
- if (error)
- return error;
- return irvtd_set_serial_info(driver,
- (struct serial_struct *) arg);
-
-
- case TIOCSERGETLSR: /* Get line status register */
- error = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int));
- if (error)
- return error;
- else
- return get_lsr_info(driver, (unsigned int *) arg);
-#endif
+ return set_serial_info(driver, (struct serial_struct *) arg);
-/*
- * I think we don't need them
- */
-/* case TIOCSERCONFIG: */
-
-
-/*
- * They cannot be implemented because we don't use async_struct
- * which is defined in serial.h
- */
-
-/* case TIOCSERGSTRUCT: */
-/* error = verify_area(VERIFY_WRITE, (void *) arg, */
-/* sizeof(struct async_struct)); */
-/* if (error) */
-/* return error; */
-/* memcpy_tofs((struct async_struct *) arg, */
-/* driver, sizeof(struct async_struct)); */
-/* return 0; */
-
-/* case TIOCSERGETMULTI: */
-/* error = verify_area(VERIFY_WRITE, (void *) arg, */
-/* sizeof(struct serial_multiport_struct)); */
-/* if (error) */
-/* return error; */
-/* return get_multiport_struct(driver, */
-/* (struct serial_multiport_struct *) arg); */
-/* case TIOCSERSETMULTI: */
-/* error = verify_area(VERIFY_READ, (void *) arg, */
-/* sizeof(struct serial_multiport_struct)); */
-/* if (error) */
-/* return error; */
-/* return set_multiport_struct(driver, */
-/* (struct serial_multiport_struct *) arg); */
- /*
- * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS)
- * to change
- * - mask passed in arg for lines of interest
- * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
- * Caller should use TIOCGICOUNT to see which one it was
- */
case TIOCMIWAIT:
+ save_flags(flags); cli();
+ /* note the counters on entry */
+ cprev = driver->icount;
+ restore_flags(flags);
while (1) {
interruptible_sleep_on(&driver->delta_msr_wait);
- /* see if a signal did it */
-/* if (current->signal & ~current->blocked) */
-/* return -ERESTARTSYS; */
-
- if ( ((arg & TIOCM_RNG) && (driver->msr & DELTA_RI)) ||
- ((arg & TIOCM_DSR) && (driver->msr & DELTA_DSR)) ||
- ((arg & TIOCM_CD) && (driver->msr & DELTA_DCD)) ||
- ((arg & TIOCM_CTS) && (driver->msr & DELTA_CTS))) {
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ save_flags(flags); cli();
+ cnow = driver->icount; /* atomic copy */
+ restore_flags(flags);
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+ return -EIO; /* no change => error */
+ if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
return 0;
}
+ cprev = cnow;
}
/* NOTREACHED */
-
case TIOCGICOUNT:
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for
- * RI where only 0->1 is counted.
- */
- error = verify_area(VERIFY_WRITE, (void *) arg,
- sizeof(struct icounter_struct));
- if (error)
- return error;
- cli();
+ save_flags(flags); cli();
cnow = driver->icount;
- sti();
- p_cuser = (struct icounter_struct *) arg;
- put_user(cnow.cts, &p_cuser->cts);
- put_user(cnow.dsr, &p_cuser->dsr);
- put_user(cnow.rng, &p_cuser->rng);
- put_user(cnow.dcd, &p_cuser->dcd);
+ restore_flags(flags);
+ p_cuser = (struct serial_icounter_struct *) arg;
+ error = put_user(cnow.cts, &p_cuser->cts);
+ if (error) return error;
+ error = put_user(cnow.dsr, &p_cuser->dsr);
+ if (error) return error;
+ error = put_user(cnow.rng, &p_cuser->rng);
+ if (error) return error;
+ error = put_user(cnow.dcd, &p_cuser->dcd);
+ if (error) return error;
+ error = put_user(cnow.rx, &p_cuser->rx);
+ if (error) return error;
+ error = put_user(cnow.tx, &p_cuser->tx);
+ if (error) return error;
+ error = put_user(cnow.frame, &p_cuser->frame);
+ if (error) return error;
+ error = put_user(cnow.overrun, &p_cuser->overrun);
+ if (error) return error;
+ error = put_user(cnow.parity, &p_cuser->parity);
+ if (error) return error;
+ error = put_user(cnow.brk, &p_cuser->brk);
+ if (error) return error;
+ error = put_user(cnow.buf_overrun, &p_cuser->buf_overrun);
+ if (error) return error;
return 0;
+
+
+ /* ioctls which are imcompatible with serial.c */
-
- case TIOCGSERIAL:
- case TIOCSSERIAL:
+ case TIOCSERGSTRUCT:
+ DEBUG(0,__FUNCTION__"():sorry, TIOCSERGSTRUCT is not supported\n");
+ return -ENOIOCTLCMD;
case TIOCSERGETLSR:
+ DEBUG(0,__FUNCTION__"():sorry, TIOCSERGETLSR is not supported\n");
+ return -ENOIOCTLCMD;
case TIOCSERCONFIG:
- case TIOCSERGWILD:
- case TIOCSERSWILD:
- case TIOCSERGSTRUCT:
- case TIOCSERGETMULTI:
- case TIOCSERSETMULTI:
- DEBUG(0,"irvtd_ioctl:sorry, ioctl(0x%08x)is not implemented\n",cmd);
- return -ENOIOCTLCMD; /* ioctls which are imcompatible with serial.c */
-
- case TCSETS:
- case TCGETS:
- case TCFLSH:
+ DEBUG(0,__FUNCTION__"():sorry, TIOCSERCONFIG is not supported\n");
+ return -ENOIOCTLCMD;
+
+
default:
- return -ENOIOCTLCMD; /* ioctls which we must not touch */
+ return -ENOIOCTLCMD; /* ioctls which we must ignore */
}
return 0;
}
@@ -1766,14 +1618,14 @@ void irvtd_set_termios(struct tty_struct *tty, struct termios * old_termios){
static void irvtd_send_xchar(struct tty_struct *tty, char ch){
- DEBUG(0, __FUNCTION__"():\n");
+ DEBUG(1, __FUNCTION__"():\n");
irvtd_put_char(tty, ch);
}
void irvtd_throttle(struct tty_struct *tty){
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
- DEBUG(0, "irvtd_throttle:\n");
+ DEBUG(1, "irvtd_throttle:\n");
if (I_IXOFF(tty))
irvtd_put_char(tty, STOP_CHAR(tty));
@@ -1781,22 +1633,22 @@ void irvtd_throttle(struct tty_struct *tty){
driver->mcr &= ~MCR_RTS;
driver->mcr |= DELTA_RTS;
driver->comm->dte = driver->mcr;
- ircomm_append_ctrl(driver->comm, DTELINE_STATE );
- ircomm_control_request(driver->comm);
+ ircomm_control_request(driver->comm, DTELINE_STATE );
+
irttp_flow_request(driver->comm->tsap, FLOW_STOP);
}
void irvtd_unthrottle(struct tty_struct *tty){
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
- DEBUG(0, "irvtd_unthrottle:\n");
+ DEBUG(1, "irvtd_unthrottle:\n");
if (I_IXOFF(tty))
irvtd_put_char(tty, START_CHAR(tty));
driver->mcr |= (MCR_RTS|DELTA_RTS);
driver->comm->dte = driver->mcr;
- ircomm_append_ctrl(driver->comm, DTELINE_STATE );
- ircomm_control_request(driver->comm);
+ ircomm_control_request(driver->comm, DTELINE_STATE );
+
irttp_flow_request(driver->comm->tsap, FLOW_START);
}
@@ -1829,19 +1681,17 @@ irvtd_start(struct tty_struct *tty){
* ------------------------------------------------------------
* irvtd_hangup()
* This routine notifies that tty layer have got HUP signal
- * Is this routine right ? :{|
* ------------------------------------------------------------
*/
void irvtd_hangup(struct tty_struct *tty){
struct irvtd_cb *info = (struct irvtd_cb *)tty->driver_data;
- DEBUG(0, "irvtd_hangup()\n");
+ DEBUG(0, __FUNCTION__"()\n");
irvtd_flush_buffer(tty);
irvtd_shutdown(info);
info->count = 0;
- info->flags &= ~(IRVTD_ASYNC_NORMAL_ACTIVE|IRVTD_ASYNC_CALLOUT_ACTIVE);
info->tty = NULL;
wake_up_interruptible(&info->open_wait);
}
@@ -1851,15 +1701,18 @@ void irvtd_flush_buffer(struct tty_struct *tty){
struct irvtd_cb *driver = (struct irvtd_cb *)tty->driver_data;
struct sk_buff *skb;
- skb = (struct sk_buff *)driver->txbuff;
+ skb = driver->txbuff;
+ ASSERT(skb != NULL, return;);
- DEBUG(4, "irvtd_flush_buffer:%d chars are gone..\n",(int)skb->len);
- skb_trim(skb,0);
+ if(skb->len){
+ DEBUG(0, __FUNCTION__"():%d chars in txbuff are lost..\n",(int)skb->len);
+ skb_trim(skb,0);
+ }
/* write_wait is a wait queue of tty_wait_until_sent().
* see tty_io.c of kernel
*/
- wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->write_wait);
if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
tty->ldisc.write_wakeup)
@@ -1867,3 +1720,275 @@ void irvtd_flush_buffer(struct tty_struct *tty){
}
+
+/*
+ * Function ircomm_register_device(void), init_module() and friends
+ *
+ * we register "port emulation entity"(see IrCOMM specification) here
+ * as a tty device.
+ */
+
+int irvtd_register_ttydriver(void){
+
+ DEBUG( 4, "-->irvtd_register_ttydriver\n");
+
+ /* setup virtual serial port device */
+
+ /* Initialize the tty_driver structure ,which is defined in
+ tty_driver.h */
+
+ memset(&irvtd_drv, 0, sizeof(struct tty_driver));
+ irvtd_drv.magic = IRVTD_MAGIC;
+ irvtd_drv.driver_name = "IrCOMM_tty";
+ irvtd_drv.name = "irnine";
+ irvtd_drv.major = IRCOMM_MAJOR;
+ irvtd_drv.minor_start = IRVTD_MINOR;
+ irvtd_drv.num = COMM_MAX_TTY;
+ irvtd_drv.type = TTY_DRIVER_TYPE_SERIAL; /* see tty_driver.h */
+
+
+ /*
+ * see drivers/char/tty_io.c and termios(3)
+ */
+
+ irvtd_drv.init_termios = tty_std_termios;
+ irvtd_drv.init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ irvtd_drv.flags = TTY_DRIVER_REAL_RAW; /* see tty_driver.h */
+ irvtd_drv.refcount = &irvtd_refcount;
+
+ /* pointer to the tty data structures */
+
+ irvtd_drv.table = irvtd_table;
+ irvtd_drv.termios = irvtd_termios;
+ irvtd_drv.termios_locked = irvtd_termios_locked;
+
+ /*
+ * Interface table from the kernel(tty driver) to the ircomm
+ * layer
+ */
+
+ irvtd_drv.open = irvtd_open;
+ irvtd_drv.close = irvtd_close;
+ irvtd_drv.write = irvtd_write;
+ irvtd_drv.put_char = irvtd_put_char;
+ irvtd_drv.flush_chars = irvtd_flush_chars;
+ irvtd_drv.write_room = irvtd_write_room;
+ irvtd_drv.chars_in_buffer = irvtd_chars_in_buffer;
+ irvtd_drv.flush_buffer = irvtd_flush_buffer;
+ irvtd_drv.ioctl = irvtd_ioctl;
+ irvtd_drv.throttle = irvtd_throttle;
+ irvtd_drv.unthrottle = irvtd_unthrottle;
+ irvtd_drv.set_termios = irvtd_set_termios;
+ irvtd_drv.stop = NULL; /* irvtd_stop; */
+ irvtd_drv.start = NULL; /* irvtd_start; */
+ irvtd_drv.hangup = irvtd_hangup;
+
+ irvtd_drv.send_xchar = irvtd_send_xchar;
+ irvtd_drv.break_ctl = irvtd_break;
+ irvtd_drv.read_proc = irvtd_read_proc;
+ irvtd_drv.wait_until_sent = irvtd_wait_until_sent;
+
+
+
+ if (tty_register_driver(&irvtd_drv)){
+ DEBUG(0,"IrCOMM:Couldn't register tty driver\n");
+ return(1);
+ }
+
+ DEBUG( 4, "irvtd_register_ttydriver: done.\n");
+ return(0);
+}
+
+
+/*
+ * Function irvtd_unregister_device(void)
+ * it will be called when you rmmod
+ */
+
+void irvtd_unregister_ttydriver(void){
+
+ int err;
+ DEBUG( 4, "--> irvtd_unregister_device\n");
+
+ /* unregister tty device */
+
+ err = tty_unregister_driver(&irvtd_drv);
+ if (err)
+ printk("IrCOMM: failed to unregister vtd driver(%d)\n",err);
+ DEBUG( 4, "irvtd_unregister_device -->\n");
+ return;
+}
+
+/*
+ **********************************************************************
+ * proc stuff
+ *
+ **********************************************************************
+ */
+
+static int line_info(char *buf, struct irvtd_cb *driver)
+{
+ int ret=0;
+
+ ASSERT(driver != NULL,goto exit;);
+ ASSERT(driver->magic == IRVTD_MAGIC,goto exit;);
+
+ ret += sprintf(buf, "tx: %d rx: %d"
+ ,driver->icount.tx, driver->icount.rx);
+
+ if (driver->icount.frame)
+ ret += sprintf(buf+ret, " fe:%d", driver->icount.frame);
+ if (driver->icount.parity)
+ ret += sprintf(buf+ret, " pe:%d", driver->icount.parity);
+ if (driver->icount.brk)
+ ret += sprintf(buf+ret, " brk:%d", driver->icount.brk);
+ if (driver->icount.overrun)
+ ret += sprintf(buf+ret, " oe:%d", driver->icount.overrun);
+
+ if (driver->mcr & MCR_RTS)
+ ret += sprintf(buf+ret, "|RTS");
+ if (driver->msr & MSR_CTS)
+ ret += sprintf(buf+ret, "|CTS");
+ if (driver->mcr & MCR_DTR)
+ ret += sprintf(buf+ret, "|DTR");
+ if (driver->msr & MSR_DSR)
+ ret += sprintf(buf+ret, "|DSR");
+ if (driver->msr & MSR_DCD)
+ ret += sprintf(buf+ret, "|CD");
+ if (driver->msr & MSR_RI)
+ ret += sprintf(buf+ret, "|RI");
+
+ exit:
+ ret += sprintf(buf+ret, "\n");
+ return ret;
+}
+
+
+
+static int irvtd_read_proc(char *buf, char **start, off_t offset, int len,
+ int *eof, void *unused)
+{
+ int i, count = 0, l;
+ off_t begin = 0;
+
+ count += sprintf(buf, "driver revision:%s\n", revision_date);
+ for (i = 0; i < COMM_MAX_TTY && count < 4000; i++) {
+ l = line_info(buf + count, irvtd[i]);
+ count += l;
+ if (count+begin > offset+len)
+ goto done;
+ if (count+begin < offset) {
+ begin += count;
+ count = 0;
+ }
+ }
+ *eof = 1;
+done:
+ if (offset >= count+begin)
+ return 0;
+ *start = buf + (begin-offset);
+ return ((len < begin+count-offset) ? len : begin+count-offset);
+}
+
+
+
+
+/************************************************************
+ * init & cleanup this module
+ ************************************************************/
+
+__initfunc(int irvtd_init(void))
+{
+ int i;
+
+ DEBUG( 4, __FUNCTION__"()\n");
+ printk( KERN_INFO
+ "ircomm_tty: virtual tty driver for IrCOMM ( revision:%s )\n",
+ revision_date);
+
+
+ /* allocate a master array */
+
+ irvtd = (struct irvtd_cb **) kmalloc( sizeof(void *) *
+ COMM_MAX_TTY,GFP_KERNEL);
+ if ( irvtd == NULL) {
+ printk( KERN_WARNING __FUNCTION__"(): kmalloc failed!\n");
+ return -ENOMEM;
+ }
+
+ memset( irvtd, 0, sizeof(void *) * COMM_MAX_TTY);
+
+ for (i=0; i < COMM_MAX_TTY; i++){
+ irvtd[i] = kmalloc( sizeof(struct irvtd_cb), GFP_KERNEL);
+ if(irvtd[i] == NULL){
+ printk(KERN_ERR __FUNCTION__"(): kmalloc failed!\n");
+ return -ENOMEM;
+ }
+ memset( irvtd[i], 0, sizeof(struct irvtd_cb));
+ irvtd[i]->magic = IRVTD_MAGIC;
+ irvtd[i]->line = i;
+ irvtd[i]->closing_wait = 10*HZ ;
+ irvtd[i]->close_delay = 5*HZ/10 ;
+ }
+
+ /*
+ * initialize a "port emulation entity"
+ */
+
+ if(irvtd_register_ttydriver()){
+ printk( KERN_WARNING "IrCOMM: Error in ircomm_register_device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void irvtd_cleanup(void)
+{
+ int i;
+ DEBUG( 4, __FUNCTION__"()\n");
+
+ /*
+ * free some resources
+ */
+ if (irvtd) {
+ for (i=0; i<COMM_MAX_TTY; i++) {
+ if (irvtd[i]) {
+ if(irvtd[i]->comm)
+ ircomm_close_instance(irvtd[i]->comm);
+ if(irvtd[i]->txbuff)
+ dev_kfree_skb(irvtd[i]->txbuff);
+ DEBUG( 4, "freeing structures\n");
+ kfree(irvtd[i]);
+ irvtd[i] = NULL;
+ }
+ }
+ DEBUG( 4, "freeing master array\n");
+ kfree(irvtd);
+ irvtd = NULL;
+ }
+
+ irvtd_unregister_ttydriver();
+
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ irvtd_init();
+ return 0;
+}
+
+/*
+ * Function ircomm_cleanup (void)
+ * This is called when you rmmod.
+ */
+
+void cleanup_module(void)
+{
+ irvtd_cleanup();
+}
+
+#endif /* MODULE */
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 65a8605a7..cf9e6ea34 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irda_device.c
- * Version: 0.3
+ * Version: 0.5
* Description: Abstract device driver layer and helper functions
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Wed Sep 2 20:22:08 1998
- * Modified at: Mon Jan 18 11:05:59 1999
+ * Modified at: Wed Apr 21 09:48:19 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli, All Rights Reserved.
@@ -30,9 +30,15 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/init.h>
+#include <linux/tty.h>
-#include <net/pkt_sched.h>
+#include <asm/ioctls.h>
+#include <asm/segment.h>
+#include <asm/uaccess.h>
#include <asm/dma.h>
+#include <asm/spinlock.h>
+
+#include <net/pkt_sched.h>
#include <net/irda/irda_device.h>
#include <net/irda/irlap_frame.h>
@@ -45,11 +51,10 @@ extern int w83977af_init(void);
extern int esi_init(void);
extern int tekram_init(void);
extern int actisys_init(void);
+extern int girbil_init(void);
hashbin_t *irda_device = NULL;
-void irda_device_start_todo_timer( struct irda_device *self, int timeout);
-
/* Netdevice functions */
static int irda_device_net_rebuild_header(struct sk_buff *skb);
static int irda_device_net_hard_header(struct sk_buff *skb,
@@ -67,8 +72,6 @@ int irda_device_proc_read( char *buf, char **start, off_t offset, int len,
__initfunc(int irda_device_init( void))
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
/* Allocate master array */
irda_device = hashbin_new( HB_LOCAL);
if ( irda_device == NULL) {
@@ -98,17 +101,19 @@ __initfunc(int irda_device_init( void))
#ifdef CONFIG_ACTISYS_DONGLE
actisys_init();
#endif
-
+#ifdef CONFIG_GIRBIL_DONGLE
+ girbil_init();
+#endif
return 0;
}
void irda_device_cleanup(void)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( irda_device != NULL, return;);
+ ASSERT(irda_device != NULL, return;);
- hashbin_delete( irda_device, (FREE_FUNC) irda_device_close);
+ hashbin_delete(irda_device, (FREE_FUNC) irda_device_close);
}
/*
@@ -117,80 +122,83 @@ void irda_device_cleanup(void)
* Open a new IrDA port device
*
*/
-int irda_device_open( struct irda_device *self, char *name, void *priv)
+int irda_device_open(struct irda_device *self, char *name, void *priv)
{
int result;
int i=0;
-
- /* Check that a minimum of allocation flags are specified */
- ASSERT(( self->rx_buff.flags & (GFP_KERNEL|GFP_ATOMIC)) != 0,
- return -1;);
- ASSERT(( self->tx_buff.flags & (GFP_KERNEL|GFP_ATOMIC)) != 0,
- return -1;);
-
- ASSERT( self->tx_buff.truesize > 0, return -1;);
- ASSERT( self->rx_buff.truesize > 0, return -1;);
-
- self->rx_buff.data = ( __u8 *) kmalloc( self->rx_buff.truesize,
- self->rx_buff.flags);
- self->tx_buff.data = ( __u8 *) kmalloc( self->tx_buff.truesize,
- self->tx_buff.flags);
-
- if ( self->rx_buff.data == NULL || self->tx_buff.data == NULL) {
- DEBUG( 0, "IrDA Self: no space for buffers!\n");
- irda_device_close( self);
- return -ENOMEM;
+
+ /* Allocate memory if needed */
+ if (self->rx_buff.truesize > 0) {
+ self->rx_buff.head = ( __u8 *) kmalloc(self->rx_buff.truesize,
+ self->rx_buff.flags);
+ if (self->rx_buff.head == NULL)
+ return -ENOMEM;
+
+ memset(self->rx_buff.head, 0, self->rx_buff.truesize);
}
+ if (self->tx_buff.truesize > 0) {
+ self->tx_buff.head = ( __u8 *) kmalloc(self->tx_buff.truesize,
+ self->tx_buff.flags);
+ if (self->tx_buff.head == NULL)
+ return -ENOMEM;
- memset( self->rx_buff.data, 0, self->rx_buff.truesize);
- memset( self->tx_buff.data, 0, self->tx_buff.truesize);
+ memset(self->tx_buff.head, 0, self->tx_buff.truesize);
+ }
- self->magic = IRDA_DEVICE_MAGIC;
+ self->magic = IRDA_DEVICE_MAGIC;
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
+ self->tx_buff.data = self->tx_buff.head;
+ self->rx_buff.data = self->rx_buff.head;
/* Initialize timers */
- init_timer( &self->media_busy_timer);
-
- /* Open new IrLAP layer instance */
- self->irlap = irlap_open( self);
+ init_timer(&self->media_busy_timer);
/* A pointer to the low level implementation */
self->priv = priv;
/* Initialize IrDA net device */
do {
- sprintf( self->name, "%s%d", "irda", i++);
- } while ( dev_get( self->name) != NULL);
+ sprintf(self->name, "%s%d", "irda", i++);
+ } while (dev_get(self->name) != NULL);
self->netdev.name = self->name;
self->netdev.priv = (void *) self;
self->netdev.next = NULL;
- if (( result = register_netdev( &self->netdev)) != 0) {
- DEBUG( 0, __FUNCTION__ "(), register_netdev() failed!\n");
+ if ((result = register_netdev(&self->netdev)) != 0) {
+ DEBUG(0, __FUNCTION__ "(), register_netdev() failed!\n");
return -1;
}
-
+
/*
* Make the description for the device. self->netdev.name will get
* a name like "irda0" and the self->descriptin will get a name
* like "irda0 <-> irtty0"
*/
- strncpy( self->description, self->name, 4);
- strcat( self->description, " <-> ");
- strncat( self->description, name, 23);
-
- hashbin_insert( irda_device, (QUEUE *) self, (int) self, NULL);
-
+ strncpy(self->description, self->name, 5);
+ strcat(self->description, " <-> ");
+ strncat(self->description, name, 23);
+
+ hashbin_insert(irda_device, (QUEUE *) self, (int) self, NULL);
+
/* Open network device */
- dev_open( &self->netdev);
+ dev_open(&self->netdev);
+
+ MESSAGE("IrDA: Registred device %s\n", self->name);
- printk( "IrDA device %s registered.\n", self->name);
+ irda_device_set_media_busy(self, FALSE);
- irda_device_set_media_busy( self, FALSE);
+ /*
+ * Open new IrLAP layer instance, now that everything should be
+ * initialized properly
+ */
+ self->irlap = irlap_open(self);
+ /* It's now safe to initilize the saddr */
+ memcpy(self->netdev.dev_addr, &self->irlap->saddr, 4);
+
return 0;
}
@@ -200,29 +208,29 @@ int irda_device_open( struct irda_device *self, char *name, void *priv)
* Close this instance of the irda_device, just deallocate buffers
*
*/
-void __irda_device_close( struct irda_device *self)
+void __irda_device_close(struct irda_device *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return;);
+ DEBUG(4, __FUNCTION__ "()\n");
- dev_close( &self->netdev);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
- /* Remove netdevice */
- unregister_netdev( &self->netdev);
+ /* We do this test to know if the device has been registered at all */
+ if (self->netdev.type == ARPHRD_IRDA) {
+ dev_close(&self->netdev);
+
+ /* Remove netdevice */
+ unregister_netdev(&self->netdev);
+ }
/* Stop timers */
- del_timer( &self->todo_timer);
- del_timer( &self->media_busy_timer);
+ del_timer(&self->media_busy_timer);
- if ( self->tx_buff.data) {
- kfree( self->tx_buff.data);
- }
+ if (self->tx_buff.head)
+ kfree(self->tx_buff.head);
- if ( self->rx_buff.data) {
- kfree( self->rx_buff.data);
- }
+ if (self->rx_buff.head)
+ kfree(self->rx_buff.head);
self->magic = 0;
}
@@ -233,20 +241,21 @@ void __irda_device_close( struct irda_device *self)
*
*
*/
-void irda_device_close( struct irda_device *self)
+void irda_device_close(struct irda_device *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
- /* Stop IrLAP */
- irlap_close( self->irlap);
+ /* Stop and remove instance of IrLAP */
+ if (self->irlap)
+ irlap_close(self->irlap);
self->irlap = NULL;
- hashbin_remove( irda_device, (int) self, NULL);
+ hashbin_remove(irda_device, (int) self, NULL);
- __irda_device_close( self);
+ __irda_device_close(self);
}
/*
@@ -255,20 +264,20 @@ void irda_device_close( struct irda_device *self)
* Called when we have detected that another station is transmiting
* in contention mode.
*/
-void irda_device_set_media_busy( struct irda_device *self, int status)
+void irda_device_set_media_busy(struct irda_device *self, int status)
{
- DEBUG( 4, __FUNCTION__ "(%s)\n", status ? "TRUE" : "FALSE");
+ DEBUG(4, __FUNCTION__ "(%s)\n", status ? "TRUE" : "FALSE");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
- if ( status) {
+ if (status) {
self->media_busy = TRUE;
- irda_device_start_mbusy_timer( self);
+ irda_device_start_mbusy_timer(self);
DEBUG( 4, "Media busy!\n");
} else {
self->media_busy = FALSE;
- del_timer( &self->media_busy_timer);
+ del_timer(&self->media_busy_timer);
}
}
@@ -278,35 +287,26 @@ void irda_device_set_media_busy( struct irda_device *self, int status)
* When this function is called, we will have a process context so its
* possible for us to sleep, wait or whatever :-)
*/
-static void __irda_device_change_speed( struct irda_device *self, int speed)
+static void __irda_device_change_speed(struct irda_device *self, int speed)
{
- ASSERT( self != NULL, return;);
-
- if ( self->magic != IRDA_DEVICE_MAGIC) {
- DEBUG( 0, __FUNCTION__
- "(), irda device is gone! Maybe you need to update "
- "your irmanager and/or irattach!");
-
- return;
- }
-
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
+
/*
* Is is possible to change speed yet? Wait until the last byte
* has been transmitted.
*/
- if ( self->wait_until_sent) {
- self->wait_until_sent( self);
-
- if ( self->change_speed) {
- self->change_speed( self, speed);
-
+ if (self->wait_until_sent) {
+ self->wait_until_sent(self);
+ if (self->change_speed) {
+ self->change_speed(self, speed);
+
/* Update the QoS value only */
self->qos.baud_rate.value = speed;
}
} else {
- DEBUG( 0, __FUNCTION__ "(), Warning, wait_until_sent() "
- "is not implemented by the irda_device!\n");
-
+ printk(KERN_WARNING "wait_until_sent() "
+ "has not implemented by the IrDA device driver!\n");
}
}
@@ -316,16 +316,16 @@ static void __irda_device_change_speed( struct irda_device *self, int speed)
* Change the speed of the currently used irda_device
*
*/
-inline void irda_device_change_speed( struct irda_device *self, int speed)
+inline void irda_device_change_speed(struct irda_device *self, int speed)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return;);
- irda_execute_as_process( self,
- (TODO_CALLBACK) __irda_device_change_speed,
- speed);
+ irda_execute_as_process(self,
+ (TODO_CALLBACK) __irda_device_change_speed,
+ speed);
}
inline int irda_device_is_media_busy( struct irda_device *self)
@@ -355,41 +355,6 @@ inline struct qos_info *irda_device_get_qos( struct irda_device *self)
return &self->qos;
}
-void irda_device_todo_expired( unsigned long data)
-{
- struct irda_device *self = ( struct irda_device *) data;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- /* Check that we still exist */
- if ( !self || self->magic != IRDA_DEVICE_MAGIC) {
- return;
- }
- __irda_device_change_speed( self, self->new_speed);
-}
-
-/*
- * Function irda_device_start_todo_timer (self, timeout)
- *
- * Start todo timer. This function is used to delay execution of certain
- * functions. Its implemented using timers since delaying a timer or a
- * bottom halves function can be very difficult othervise.
- *
- */
-void irda_device_start_todo_timer( struct irda_device *self, int timeout)
-{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return;);
-
- del_timer( &self->todo_timer);
-
- self->todo_timer.data = (unsigned long) self;
- self->todo_timer.function = &irda_device_todo_expired;
- self->todo_timer.expires = jiffies + timeout;
-
- add_timer( &self->todo_timer);
-}
-
static struct enet_statistics *irda_device_get_stats( struct device *dev)
{
struct irda_device *priv = (struct irda_device *) dev->priv;
@@ -403,39 +368,38 @@ static struct enet_statistics *irda_device_get_stats( struct device *dev)
* This function should be used by low level device drivers in a similar way
* as ether_setup() is used by normal network device drivers
*/
-int irda_device_setup( struct device *dev)
+int irda_device_setup(struct device *dev)
{
struct irda_device *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( dev != NULL, return -1;);
+ ASSERT(dev != NULL, return -1;);
self = (struct irda_device *) dev->priv;
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return -1;);
dev->get_stats = irda_device_get_stats;
dev->rebuild_header = irda_device_net_rebuild_header;
dev->set_config = irda_device_net_set_config;
dev->change_mtu = irda_device_net_change_mtu;
- dev->hard_header = irda_device_net_hard_header;
+/* dev->hard_header = irda_device_net_hard_header; */
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->type = ARPHRD_IRDA;
- dev->tx_queue_len = 10; /* Short queues in IrDA */
+ dev->tx_queue_len = 8; /* Window size + 1 s-frame */
- memcpy( dev->dev_addr, &self->irlap->saddr, 4);
- memset( dev->broadcast, 0xff, 4);
+ memset(dev->broadcast, 0xff, 4);
dev->mtu = 2048;
dev->tbusy = 1;
- dev_init_buffers( dev);
+ dev_init_buffers(dev);
- dev->flags = 0; /* IFF_NOARP | IFF_POINTOPOINT; */
+ dev->flags = IFF_NOARP;
return 0;
}
@@ -453,16 +417,15 @@ static int irda_device_net_rebuild_header( struct sk_buff *skb)
return 0;
}
-static int irda_device_net_hard_header (struct sk_buff *skb,
- struct device *dev,
- unsigned short type, void *daddr,
- void *saddr, unsigned len)
+static int irda_device_net_hard_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len)
{
DEBUG( 0, __FUNCTION__ "()\n");
skb->mac.raw = skb->data;
/* skb_push(skb,PPP_HARD_HDR_LEN); */
-/* return PPP_HARD_HDR_LEN; */
+ /* return PPP_HARD_HDR_LEN; */
return 0;
}
@@ -485,64 +448,83 @@ static int irda_device_net_change_mtu( struct device *dev, int new_mtu)
* Function irda_device_transmit_finished (void)
*
* Check if there is still some frames in the transmit queue for this
- * device
+ * device. Maybe we should use: q->q.qlen == 0.
*
*/
int irda_device_txqueue_empty( struct irda_device *self)
{
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRDA_DEVICE_MAGIC, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRDA_DEVICE_MAGIC, return -1;);
- /* FIXME: check if this is the right way of doing it? */
- if ( skb_queue_len( &self->netdev.qdisc->q))
+ if (skb_queue_len(&self->netdev.qdisc->q))
return FALSE;
return TRUE;
}
/*
- * Function irda_get_mtt (skb)
- *
- * Utility function for getting the
- *
- */
-__inline__ int irda_get_mtt( struct sk_buff *skb)
-{
- return ((struct irlap_skb_cb *)(skb->cb))->mtt;
-}
-
-/*
* Function setup_dma (idev, buffer, count, mode)
*
* Setup the DMA channel
*
*/
-void setup_dma( int channel, char *buffer, int count, int mode)
+void setup_dma(int channel, char *buffer, int count, int mode)
{
unsigned long flags;
+
+ flags = claim_dma_lock();
+
+ disable_dma(channel);
+ clear_dma_ff(channel);
+ set_dma_mode(channel, mode);
+ set_dma_addr(channel, virt_to_bus(buffer));
+ set_dma_count(channel, count);
+ enable_dma(channel);
+
+ release_dma_lock(flags);
+}
- save_flags(flags);
- cli();
+#ifdef CONFIG_PROC_FS
- disable_dma( channel);
- clear_dma_ff( channel);
- set_dma_mode( channel, mode);
- set_dma_addr( channel, virt_to_bus(buffer));
- set_dma_count( channel, count);
- enable_dma( channel);
+int irda_device_print_flags(struct irda_device *idev, char *buf)
+{
+ int len=0;
+
+ len += sprintf( buf+len, "\t");
+
+ if (idev->netdev.flags & IFF_UP)
+ len += sprintf( buf+len, "UP ");
+ if (!idev->netdev.tbusy)
+ len += sprintf( buf+len, "RUNNING ");
+
+ if (idev->flags & IFF_SIR)
+ len += sprintf( buf+len, "SIR ");
+ if (idev->flags & IFF_MIR)
+ len += sprintf( buf+len, "MIR ");
+ if (idev->flags & IFF_FIR)
+ len += sprintf( buf+len, "FIR ");
+ if (idev->flags & IFF_PIO)
+ len += sprintf( buf+len, "PIO ");
+ if (idev->flags & IFF_DMA)
+ len += sprintf( buf+len, "DMA ");
+ if (idev->flags & IFF_SHM)
+ len += sprintf( buf+len, "SHM ");
+ if (idev->flags & IFF_DONGLE)
+ len += sprintf( buf+len, "DONGLE ");
+
+ len += sprintf( buf+len, "\n");
- restore_flags(flags);
+ return len;
}
-#ifdef CONFIG_PROC_FS
/*
- * Function irlap_proc_read (buf, start, offset, len, unused)
+ * Function irda_device_proc_read (buf, start, offset, len, unused)
*
* Give some info to the /proc file system
*
*/
-int irda_device_proc_read( char *buf, char **start, off_t offset, int len,
- int unused)
+int irda_device_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused)
{
struct irda_device *self;
unsigned long flags;
@@ -552,32 +534,32 @@ int irda_device_proc_read( char *buf, char **start, off_t offset, int len,
len = 0;
- self = (struct irda_device *) hashbin_get_first( irda_device);
+ self = (struct irda_device *) hashbin_get_first(irda_device);
while ( self != NULL) {
- len += sprintf( buf+len, "device name: %s\n", self->name);
- len += sprintf( buf+len, "description: %s\n",
- self->description);
- len += sprintf( buf+len, " tbusy=%s\n", self->netdev.tbusy ?
- "TRUE" : "FALSE");
- len += sprintf( buf+len, " bps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\n");
+ len += sprintf(buf+len, "%s,", self->name);
+ len += sprintf(buf+len, "\tbinding: %s\n",
+ self->description);
+
+ len += irda_device_print_flags(self, buf+len);
+ len += sprintf(buf+len, "\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\n");
+
+ len += sprintf(buf+len, "\t%d\t",
+ self->qos.baud_rate.value);
+ len += sprintf(buf+len, "%d\t",
+ self->qos.max_turn_time.value);
+ len += sprintf(buf+len, "%d\t",
+ self->qos.data_size.value);
+ len += sprintf(buf+len, "%d\t",
+ self->qos.window_size.value);
+ len += sprintf(buf+len, "%d\t",
+ self->qos.additional_bofs.value);
+ len += sprintf(buf+len, "%d\t",
+ self->qos.min_turn_time.value);
+ len += sprintf(buf+len, "%d",
+ self->qos.link_disc_time.value);
+ len += sprintf(buf+len, "\n");
- len += sprintf( buf+len, " %d\t",
- self->qos.baud_rate.value);
- len += sprintf( buf+len, "%d\t",
- self->qos.max_turn_time.value);
- len += sprintf( buf+len, "%d\t",
- self->qos.data_size.value);
- len += sprintf( buf+len, "%d\t",
- self->qos.window_size.value);
- len += sprintf( buf+len, "%d\t",
- self->qos.additional_bofs.value);
- len += sprintf( buf+len, "%d\t",
- self->qos.min_turn_time.value);
- len += sprintf( buf+len, "%d",
- self->qos.link_disc_time.value);
- len += sprintf( buf+len, "\n");
-
- self = (struct irda_device *) hashbin_get_next( irda_device);
+ self = (struct irda_device *) hashbin_get_next(irda_device);
}
restore_flags(flags);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index f23d3faaf..b87ccbd02 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: iriap.c
- * Version: 0.1
+ * Version: 0.8
* Description: Information Access Protocol (IAP)
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Tue Dec 15 16:00:35 1998
+ * Modified at: Fri Apr 23 09:57:12 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -28,23 +28,48 @@
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/irda.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <net/irda/irda.h>
#include <net/irda/irttp.h>
+#include <net/irda/irmod.h>
#include <net/irda/irlmp.h>
#include <net/irda/irias_object.h>
#include <net/irda/iriap_event.h>
#include <net/irda/iriap.h>
-hashbin_t *iriap = NULL;
+/* FIXME: This one should go in irlmp.c */
+static const char *ias_charset_types[] = {
+ "CS_ASCII",
+ "CS_ISO_8859_1",
+ "CS_ISO_8859_2",
+ "CS_ISO_8859_3",
+ "CS_ISO_8859_4",
+ "CS_ISO_8859_5",
+ "CS_ISO_8859_6",
+ "CS_ISO_8859_7",
+ "CS_ISO_8859_8",
+ "CS_ISO_8859_9",
+ "CS_UNICODE"
+};
+
+static hashbin_t *iriap = NULL;
+static __u32 service_handle;
+
+extern char *lmp_reasons[];
static struct iriap_cb *iriap_open( __u8 slsap, int mode);
static void __iriap_close( struct iriap_cb *self);
-static void iriap_disconnect_indication( void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *skb);
+static void iriap_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason, struct sk_buff *skb);
+static void iriap_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *skb);
+static int iriap_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
/*
* Function iriap_init (void)
@@ -54,16 +79,15 @@ static void iriap_disconnect_indication( void *instance, void *sap,
*/
__initfunc(int iriap_init(void))
{
+ __u16 hints;
struct ias_object *obj;
- DEBUG( 4, "--> iriap_init\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
/* Allocate master array */
iriap = hashbin_new( HB_LOCAL);
- if ( iriap == NULL) {
- printk( KERN_WARNING "IrIAP: Can't allocate iriap hashbin!\n");
+ if ( iriap == NULL)
return -ENOMEM;
- }
objects = hashbin_new( HB_LOCAL);
if ( objects == NULL) {
@@ -75,8 +99,9 @@ __initfunc(int iriap_init(void))
/*
* Register some default services for IrLMP
*/
- irlmp_register_layer( S_COMPUTER, SERVER | CLIENT, FALSE, NULL);
- irlmp_register_layer( S_PNP, SERVER, FALSE, NULL);
+ hints = irlmp_service_to_hint(S_COMPUTER);
+ hints |= irlmp_service_to_hint(S_PNP);
+ service_handle = irlmp_register_service(hints);
/*
* Register the Device object with LM-IAS
@@ -91,8 +116,6 @@ __initfunc(int iriap_init(void))
*/
iriap_open( LSAP_IAS, IAS_SERVER);
- DEBUG( 4, "iriap_init -->\n");
-
return 0;
}
@@ -104,11 +127,10 @@ __initfunc(int iriap_init(void))
*/
void iriap_cleanup(void)
{
- irlmp_unregister_layer( S_COMPUTER, SERVER | CLIENT);
- irlmp_unregister_layer( S_PNP, SERVER);
+ irlmp_unregister_service(service_handle);
- hashbin_delete( iriap, (FREE_FUNC) __iriap_close);
- hashbin_delete( objects, (FREE_FUNC) __irias_delete_object);
+ hashbin_delete(iriap, (FREE_FUNC) __iriap_close);
+ hashbin_delete(objects, (FREE_FUNC) __irias_delete_object);
}
/*
@@ -151,7 +173,8 @@ struct iriap_cb *iriap_open( __u8 slsap_sel, int mode)
DEBUG( 0, "iriap_open: Unable to allocated LSAP!\n");
return NULL;
}
- DEBUG( 4, "iriap_register: source LSAP sel=%02x\n", slsap_sel);
+ slsap_sel = lsap->slsap_sel;
+ DEBUG( 4, __FUNCTION__ "(), source LSAP sel=%02x\n", slsap_sel);
self->magic = IAS_MAGIC;
self->lsap = lsap;
@@ -178,6 +201,8 @@ struct iriap_cb *iriap_open( __u8 slsap_sel, int mode)
*/
static void __iriap_close( struct iriap_cb *self)
{
+ DEBUG( 4, __FUNCTION__ "()\n");
+
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
@@ -195,7 +220,9 @@ static void __iriap_close( struct iriap_cb *self)
*/
void iriap_close( struct iriap_cb *self)
{
- DEBUG( 4, "iriap_close()\n");
+ struct iriap_cb *entry;
+
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
@@ -205,7 +232,9 @@ void iriap_close( struct iriap_cb *self)
self->lsap = NULL;
}
- hashbin_remove( iriap, self->slsap_sel, NULL);
+ entry = (struct iriap_cb *) hashbin_remove( iriap, self->slsap_sel,
+ NULL);
+ ASSERT( entry == self, return;);
__iriap_close( self);
}
@@ -216,12 +245,13 @@ void iriap_close( struct iriap_cb *self)
* Got disconnect, so clean up everything assosiated with this connection
*
*/
-void iriap_disconnect_indication( void *instance, void *sap, LM_REASON reason,
- struct sk_buff *userdata)
+static void iriap_disconnect_indication( void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *userdata)
{
struct iriap_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "(), reason=%s\n", lmp_reasons[reason]);
self = (struct iriap_cb *) instance;
@@ -235,15 +265,19 @@ void iriap_disconnect_indication( void *instance, void *sap, LM_REASON reason,
if ( self->mode == IAS_CLIENT) {
DEBUG( 4, __FUNCTION__ "(), disconnect as client\n");
- /* Inform service user */
- if ( self->confirm)
- self->confirm( 0, NULL, self->priv);
-
+ /*
+ * Inform service user that the request failed by sending
+ * it a NULL value.
+ */
+ if (self->confirm)
+ self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
+
+
iriap_do_client_event( self, IAP_LM_DISCONNECT_INDICATION,
NULL);
/* Close instance only if client */
iriap_close( self);
-
+
} else {
DEBUG( 4, __FUNCTION__ "(), disconnect as server\n");
iriap_do_server_event( self, IAP_LM_DISCONNECT_INDICATION,
@@ -265,15 +299,15 @@ void iriap_disconnect_request( struct iriap_cb *self)
{
struct sk_buff *skb;
- DEBUG( 4, "iriap_disconnect_request()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
skb = dev_alloc_skb( 64);
if (skb == NULL) {
- DEBUG( 0,"iriap_getvaluebyclass: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ DEBUG( 0, __FUNCTION__
+ "(), Could not allocate an sk_buff of length %d\n", 64);
return;
}
@@ -316,9 +350,9 @@ void iriap_getvalue(void)
* Retreive all values from attribute in all objects with given class
* name
*/
-void iriap_getvaluebyclass_request( __u32 daddr, char *name, char *attr,
- CONFIRM_CALLBACK callback,
- void *priv)
+void iriap_getvaluebyclass_request(char *name, char *attr,
+ __u32 saddr, __u32 daddr,
+ CONFIRM_CALLBACK callback, void *priv)
{
struct sk_buff *skb;
struct iriap_cb *self;
@@ -326,48 +360,47 @@ void iriap_getvaluebyclass_request( __u32 daddr, char *name, char *attr,
int name_len, attr_len;
__u8 slsap = LSAP_ANY; /* Source LSAP to use */
- DEBUG( 4, "iriap_getvaluebyclass_request()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- self = iriap_open( slsap, IAS_CLIENT);
- if ( self == NULL)
+ self = iriap_open(slsap, IAS_CLIENT);
+ if (!self)
return;
self->mode = IAS_CLIENT;
self->confirm = callback;
self->priv = priv;
+
self->daddr = daddr;
+ self->saddr = saddr;
/*
* Save operation, so we know what the later indication is about
*/
self->operation = GET_VALUE_BY_CLASS;
- /* Give ourselves 7 secs to finish this operation */
- iriap_start_watchdog_timer( self, 700);
+ /* Give ourselves 10 secs to finish this operation */
+ iriap_start_watchdog_timer(self, 10*HZ);
skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0,"iriap_getvaluebyclass: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ if (!skb)
return;
- }
- name_len = strlen( name);
- attr_len = strlen( attr);
+ name_len = strlen(name);
+ attr_len = strlen(attr);
/* Reserve space for MUX and LAP header */
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
- skb_put( skb, 3+name_len+attr_len);
+ skb_reserve(skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_put(skb, 3+name_len+attr_len);
frame = skb->data;
/* Build frame */
- frame[0] = LST | GET_VALUE_BY_CLASS;
+ frame[0] = IAP_LST | GET_VALUE_BY_CLASS;
frame[1] = name_len; /* Insert length of name */
- memcpy( frame+2, name, name_len); /* Insert name */
+ memcpy(frame+2, name, name_len); /* Insert name */
frame[2+name_len] = attr_len; /* Insert length of attr */
- memcpy( frame+3+name_len, attr, attr_len); /* Insert attr */
+ memcpy(frame+3+name_len, attr, attr_len); /* Insert attr */
- iriap_do_client_event( self, IAP_CALL_REQUEST_GVBC, skb);
+ iriap_do_client_event(self, IAP_CALL_REQUEST_GVBC, skb);
}
/*
@@ -377,161 +410,175 @@ void iriap_getvaluebyclass_request( __u32 daddr, char *name, char *attr,
* to service user.
*
*/
-void iriap_getvaluebyclass_confirm( struct iriap_cb *self,
- struct sk_buff *skb)
+void iriap_getvaluebyclass_confirm(struct iriap_cb *self, struct sk_buff *skb)
{
struct ias_value *value;
+ int n;
+ int charset;
+ __u32 value_len;
+ __u32 tmp_cpu32;
__u16 obj_id;
- int len;
+ __u16 len;
__u8 type;
- int value_len;
__u8 *fp;
- int n;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IAS_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IAS_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
/* Initialize variables */
fp = skb->data;
n = 2;
/* Get length, MSB first */
- len = ntohs( *(__u16 *)( fp+n)); n += 2;
+ len = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); n += 2;
- DEBUG( 4, "iriap_getvaluebyclass_confirm: len=%d\n", len);
+ DEBUG(4, __FUNCTION__ "(), len=%d\n", len);
/* Get object ID, MSB first */
- obj_id = ntohs( *(__u16 *)( fp+n)); n += 2;
+ obj_id = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); n += 2;
+/* memcpy(&obj_id, fp+n, 2); n += 2; */
+/* be16_to_cpus(&obj_id); */
type = fp[n++];
DEBUG( 4, __FUNCTION__ "(), Value type = %d\n", type);
-
switch( type) {
case IAS_INTEGER:
- value = irias_new_integer_value( ntohl(*(__u32 *)(fp+n)));
- /*
- * Legal values restricted to 0x01-0x6f, page 15 irttp
- */
- DEBUG( 4, "iriap_getvaluebyclass_confirm: lsap=%d\n",
- value->t.integer);
+ memcpy(&tmp_cpu32, fp+n, 4); n += 4;
+ be32_to_cpus(&tmp_cpu32);
+ value = irias_new_integer_value(tmp_cpu32);
+
+ /* Legal values restricted to 0x01-0x6f, page 15 irttp */
+ DEBUG( 4, __FUNCTION__ "(), lsap=%d\n", value->t.integer);
break;
case IAS_STRING:
- /* FIXME: check len of string, and if string is/should be
- * null terminated? */
- ASSERT( fp[n++] == 0, return;); /* ASCII only! */
+ charset = fp[n++];
+
+ switch(charset) {
+ case CS_ASCII:
+ break;
+/* case CS_ISO_8859_1: */
+/* case CS_ISO_8859_2: */
+/* case CS_ISO_8859_3: */
+/* case CS_ISO_8859_4: */
+/* case CS_ISO_8859_5: */
+/* case CS_ISO_8859_6: */
+/* case CS_ISO_8859_7: */
+/* case CS_ISO_8859_8: */
+/* case CS_ISO_8859_9: */
+/* case CS_UNICODE: */
+ default:
+ DEBUG(0, __FUNCTION__"(), charset %s, not supported\n",
+ ias_charset_types[charset]);
+ return;
+ /* break; */
+ }
value_len = fp[n++];
- DEBUG( 0, __FUNCTION__ "(), strlen=%d\n", value_len);
+ DEBUG(4, __FUNCTION__ "(), strlen=%d\n", value_len);
ASSERT( value_len < 64, return;);
- DEBUG( 0, "Got string %s\n", fp+n);
-
- value = irias_new_string_value( fp+n);
-
+ /* Make sure the string is null-terminated */
+ fp[n+value_len] = 0x00;
+
+ DEBUG(4, "Got string %s\n", fp+n);
+ value = irias_new_string_value(fp+n);
break;
case IAS_OCT_SEQ:
- value_len = ntohs( *(__u16 *)( fp+n)); n += 2;
+ value_len = be16_to_cpu(get_unaligned((__u16 *)(fp+n)));
+ n += 2;
/* FIXME:should be 1024, but.... */
- DEBUG( 0, __FUNCTION__ "():octet sequence:len=%d\n",
- value_len);
+ DEBUG(0, __FUNCTION__ "():octet sequence:len=%d\n", value_len);
ASSERT(value_len <= 55, return;);
- value = irias_new_octseq_value( fp+n, value_len);
-
+ value = irias_new_octseq_value(fp+n, value_len);
break;
default:
value = &missing;
break;
}
- if ( self->confirm)
- self->confirm(obj_id, value, self->priv);
-
- /*
- * Finished, close connection!
- */
- iriap_disconnect_request( self);
+ /* Finished, close connection! */
+ iriap_disconnect_request(self);
+
+ if (self->confirm)
+ self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
}
/*
* Function iriap_getvaluebyclass_response ()
*
- * Send answer to getvaluebyclass_indication back to peer LM-IAS
+ * Send answer back to remote LM-IAS
*
*/
-void iriap_getvaluebyclass_response( struct iriap_cb *self,
- __u16 obj_id,
- __u8 ret_code,
- struct ias_value *value)
+void iriap_getvaluebyclass_response(struct iriap_cb *self, __u16 obj_id,
+ __u8 ret_code, struct ias_value *value)
{
struct sk_buff *skb;
- __u8 *fp;
int n;
+ __u32 tmp_be32, tmp_be16;
+ __u8 *fp;
- DEBUG( 4, "iriap_getvaluebyclass_response()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
+ ASSERT( value != NULL, return;);
+ ASSERT( value->len <= 1024, return;);
/* Initialize variables */
n = 0;
/*
- * FIXME: adjust the size of the response after the length of the
- * value
+ * We must adjust the size of the response after the length of the
+ * value. We add 9 bytes because of the 6 bytes for the frame and
+ * max 3 bytes for the value coding.
*/
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__ "(),"
- "Could not allocate an skb of length %d\n", 64);
+ skb = dev_alloc_skb(value->len + LMP_HEADER + LAP_HEADER + 9);
+ if (!skb)
return;
- }
/* Reserve space for MUX and LAP header */
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ skb_reserve( skb, LMP_HEADER+LAP_HEADER);
skb_put( skb, 6);
fp = skb->data;
/* Build frame */
- fp[n++] = GET_VALUE_BY_CLASS | LST;
+ fp[n++] = GET_VALUE_BY_CLASS | IAP_LST;
fp[n++] = ret_code;
/* Insert list length (MSB first) */
- *((__u16 *) (fp+n)) = __constant_htons( 0x0001); n += 2;
+ tmp_be16 = __constant_htons(0x0001);
+ memcpy(fp+n, &tmp_be16, 2); n += 2;
/* Insert object identifier ( MSB first) */
- *((__u16 *) (fp+n)) = htons( obj_id); n += 2;
+ tmp_be16 = cpu_to_be16(obj_id);
+ memcpy(fp+n, &tmp_be16, 2); n += 2;
- switch( value->type) {
+ switch(value->type) {
case IAS_STRING:
- skb_put( skb, 3 + value->len);
+ skb_put(skb, 3 + value->len);
fp[n++] = value->type;
fp[n++] = 0; /* ASCII */
fp[n++] = (__u8) value->len;
- memcpy( fp+n, value->t.string, value->len); n+=value->len;
+ memcpy(fp+n, value->t.string, value->len); n+=value->len;
break;
case IAS_INTEGER:
- skb_put( skb, 5);
+ skb_put(skb, 5);
fp[n++] = value->type;
- *((__u32 *)(fp+n)) = htonl(value->t.integer); n+=4;
+ tmp_be32 = cpu_to_be32(value->t.integer);
+ memcpy(fp+n, &tmp_be32, 4); n += 4;
break;
case IAS_OCT_SEQ:
-
- /* FIXME:
- * we can send only 55 octets at this time.
- * we should be able to send 1024 octets. TH
- */
-
- ASSERT(value->len <= 55, return ;);
- skb_put( skb, 3 + value->len);
+ skb_put(skb, 3 + value->len);
fp[n++] = value->type;
- *((__u16 *)(fp+n)) = htons(value->len); n+=2;
+
+ tmp_be16 = cpu_to_be16(value->len);
+ memcpy(fp+n, &tmp_be16, 2); n += 2;
memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len;
-
break;
case IAS_MISSING:
DEBUG( 3, __FUNCTION__ ": sending IAS_MISSING\n");
@@ -540,11 +587,10 @@ void iriap_getvaluebyclass_response( struct iriap_cb *self,
break;
default:
- DEBUG( 0, "iriap_getvaluebyclass_response: "
- "type not implemented!\n");
+ DEBUG(0, __FUNCTION__ "(), type not implemented!\n");
break;
}
- iriap_do_r_connect_event( self, IAP_CALL_RESPONSE, skb);
+ iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, skb);
}
/*
@@ -553,8 +599,8 @@ void iriap_getvaluebyclass_response( struct iriap_cb *self,
* getvaluebyclass is requested from peer LM-IAS
*
*/
-void iriap_getvaluebyclass_indication( struct iriap_cb *self,
- struct sk_buff *skb)
+void iriap_getvaluebyclass_indication(struct iriap_cb *self,
+ struct sk_buff *skb)
{
__u8 *fp;
int n;
@@ -562,11 +608,10 @@ void iriap_getvaluebyclass_indication( struct iriap_cb *self,
int attr_len;
char name[64];
char attr[64];
- char both[128];
struct ias_object *obj;
struct ias_attrib *attrib;
- DEBUG( 4, "iriap_getvaluebyclass_indication()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
@@ -588,30 +633,26 @@ void iriap_getvaluebyclass_indication( struct iriap_cb *self,
/*
* Now, do some advanced parsing! :-)
*/
- DEBUG( 9, "LM-IAS: Looking up %s: %s\n", name, attr);
-
- sprintf( both, "%s:%s", name, attr);
- DEBUG( 0, "LM-IAS: looking for %s\n", both);
-
- obj = irias_find_object( name);
-
+ DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
+ obj = irias_find_object(name);
+
if ( obj == NULL) {
DEBUG( 0, "LM-IAS: Object not found\n");
iriap_getvaluebyclass_response( self, 0x1235,
IAS_CLASS_UNKNOWN, &missing);
return;
}
- DEBUG( 0, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
+ DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
attrib = irias_find_attrib( obj, attr);
if ( attrib == NULL) {
DEBUG( 0, "LM-IAS: Attribute %s not found\n", attr);
- iriap_getvaluebyclass_response( self, obj->id,
- IAS_ATTRIB_UNKNOWN, &missing);
+ iriap_getvaluebyclass_response(self, obj->id,
+ IAS_ATTRIB_UNKNOWN, &missing);
return;
}
- DEBUG( 0, "LM-IAS: found %s\n", attrib->name);
+ DEBUG(4, "LM-IAS: found %s\n", attrib->name);
/*
* We have a match; send the value.
@@ -633,17 +674,14 @@ void iriap_send_ack( struct iriap_cb *self)
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 6, "iriap_send_ack()\n");
+ DEBUG( 6, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, "iriap_send_ack: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb( 64);
+ if (!skb)
return;
- }
/* Reserve space for MUX and LAP header */
skb_reserve( skb, 4);
@@ -651,7 +689,7 @@ void iriap_send_ack( struct iriap_cb *self)
frame = skb->data;
/* Build frame */
- frame[0] = LST | self->operation;
+ frame[0] = IAP_LST | self->operation;
}
/*
@@ -660,22 +698,22 @@ void iriap_send_ack( struct iriap_cb *self)
* LSAP connection confirmed!
*
*/
-void iriap_connect_confirm( void *instance, void *sap, struct qos_info *qos,
- int max_sdu_size, struct sk_buff *userdata)
+void iriap_connect_confirm(void *instance, void *sap, struct qos_info *qos,
+ __u32 max_sdu_size, struct sk_buff *userdata)
{
struct iriap_cb *self;
- self = ( struct iriap_cb *) instance;
+ self = (struct iriap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IAS_MAGIC, return;);
- ASSERT( userdata != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IAS_MAGIC, return;);
+ ASSERT(userdata != NULL, return;);
- DEBUG( 4, "iriap_connect_confirm()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- del_timer( &self->watchdog_timer);
+ /* del_timer( &self->watchdog_timer); */
- iriap_do_client_event( self, IAP_LM_CONNECT_CONFIRM, userdata);
+ iriap_do_client_event(self, IAP_LM_CONNECT_CONFIRM, userdata);
}
/*
@@ -684,13 +722,13 @@ void iriap_connect_confirm( void *instance, void *sap, struct qos_info *qos,
* Remote LM-IAS is requesting connection
*
*/
-void iriap_connect_indication( void *instance, void *sap,
- struct qos_info *qos, int max_sdu_size,
- struct sk_buff *userdata)
+static void iriap_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *userdata)
{
struct iriap_cb *self;
- DEBUG( 4, "iriap_connect_indication()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
self = ( struct iriap_cb *) instance;
@@ -707,58 +745,74 @@ void iriap_connect_indication( void *instance, void *sap,
* Receives data from connection identified by handle from IrLMP
*
*/
-void iriap_data_indication( void *instance, void *sap, struct sk_buff *skb)
+static int iriap_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
{
struct iriap_cb *self;
__u8 *frame;
__u8 opcode;
- DEBUG( 4, "iriap_data_indication()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
- self = ( struct iriap_cb *) instance;
+ self = (struct iriap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IAS_MAGIC, return;);
+ ASSERT(self != NULL, return 0;);
+ ASSERT(self->magic == IAS_MAGIC, return 0;);
- ASSERT( skb != NULL, return;);
+ ASSERT(skb != NULL, return 0;);
frame = skb->data;
- if ( self->mode == IAS_SERVER) {
+ if (self->mode == IAS_SERVER) {
/* Call server */
- DEBUG( 4, __FUNCTION__ "(), Calling server!\n");
+ DEBUG(4, __FUNCTION__ "(), Calling server!\n");
iriap_do_r_connect_event( self, IAP_RECV_F_LST, skb);
- return;
+ return 0;
}
opcode = frame[0];
if ( ~opcode & 0x80) {
printk( KERN_ERR "IrIAS multiframe commands or results is "
"not implemented yet!\n");
- return;
+ return 0;
}
- if ( ~opcode &0x40) {
- DEBUG( 4, "Got ack frame!\n");
+ if (~opcode & IAP_ACK) {
+ DEBUG(2, __FUNCTION__ "() Got ack frame!\n");
/* return; */
}
- opcode &= 0x7f; /* Mask away LST bit */
+ opcode &= ~IAP_LST; /* Mask away LST bit */
- switch( opcode) {
+ switch(opcode) {
case GET_INFO_BASE:
DEBUG( 0, "IrLMP GetInfoBaseDetails not implemented!\n");
break;
case GET_VALUE_BY_CLASS:
DEBUG( 4,"IrLMP GetValueByClass\n");
- if ( frame[1] == 0x01) {
- printk( KERN_WARNING "IrIAP No such class!\n");
- } else if ( frame[1] == 0x02) {
- printk( KERN_WARNING
- "IrIAP No such attribute!\n");
- } else {
- iriap_getvaluebyclass_confirm( self, skb);
+ switch(frame[1]) {
+ case IAS_SUCCESS:
+ iriap_getvaluebyclass_confirm(self, skb);
+ break;
+ case IAS_CLASS_UNKNOWN:
+ printk(KERN_WARNING "IrIAP No such class!\n");
+ /* Finished, close connection! */
+ iriap_disconnect_request(self);
+
+ if (self->confirm)
+ self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
+ self->priv);
+ break;
+ case IAS_ATTRIB_UNKNOWN:
+ printk(KERN_WARNING "IrIAP No such attribute!\n");
+ /* Finished, close connection! */
+ iriap_disconnect_request(self);
+
+ if (self->confirm)
+ self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
+ self->priv);
+ break;
}
iriap_do_call_event( self, IAP_RECV_F_LST, skb);
@@ -769,10 +823,10 @@ void iriap_data_indication( void *instance, void *sap, struct sk_buff *skb)
iriap_close( self);
break;
default:
- DEBUG( 0, "iriap_data_indication: Unknown op-code: %02x\n",
- opcode);
+ DEBUG(0, __FUNCTION__ "(), Unknown op-code: %02x\n", opcode);
break;
}
+ return 0;
}
/*
@@ -786,7 +840,7 @@ void iriap_call_indication( struct iriap_cb *self, struct sk_buff *skb)
__u8 *fp;
__u8 opcode;
- DEBUG( 4, "iriap_call_indication()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
@@ -812,16 +866,20 @@ void iriap_call_indication( struct iriap_cb *self, struct sk_buff *skb)
}
}
+/*
+ * Function iriap_watchdog_timer_expired (data)
+ *
+ *
+ *
+ */
void iriap_watchdog_timer_expired( unsigned long data)
{
struct iriap_cb *self = ( struct iriap_cb *) data;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IAS_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IAS_MAGIC, return;);
- DEBUG( 0, __FUNCTION__ "() Timeout! closing myself!\n");
+ DEBUG(0, __FUNCTION__ "() Timeout! closing myself!\n");
iriap_close( self);
}
@@ -834,23 +892,7 @@ static char *ias_value_types[] = {
"IAS_STRING"
};
-/* FIXME: This one should go in irlmp.c */
-static char *ias_charset_types[] = {
- "CS_ASCII",
- "CS_ISO_8859_1",
- "CS_ISO_8859_2",
- "CS_ISO_8859_3",
- "CS_ISO_8859_4",
- "CS_ISO_8859_5",
- "CS_ISO_8859_6",
- "CS_ISO_8859_7",
- "CS_ISO_8859_8",
- "CS_ISO_8859_9",
- "CS_UNICODE"
-};
-
-int irias_proc_read( char *buf, char **start, off_t offset, int len,
- int unused)
+int irias_proc_read(char *buf, char **start, off_t offset, int len, int unused)
{
struct ias_object *obj;
struct ias_attrib *attrib;
@@ -907,8 +949,8 @@ int irias_proc_read( char *buf, char **start, off_t offset, int len,
}
len += sprintf( buf+len, "\n");
- attrib = ( struct ias_attrib *)
- hashbin_get_next( obj->attribs);
+ attrib = (struct ias_attrib *)
+ hashbin_get_next(obj->attribs);
}
obj = ( struct ias_object *) hashbin_get_next( objects);
}
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c
index c0ba0058e..ccba78ece 100644
--- a/net/irda/iriap_event.c
+++ b/net/irda/iriap_event.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
- * Modified at: Wed Dec 9 02:20:02 1998
+ * Modified at: Tue Jan 26 12:29:36 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
@@ -184,6 +184,8 @@ void iriap_do_r_connect_event( struct iriap_cb *self, IRIAP_EVENT event,
static void state_s_disconnect( struct iriap_cb *self, IRIAP_EVENT event,
struct sk_buff *skb)
{
+ int ret;
+
ASSERT( self != NULL, return;);
ASSERT( self->magic == IAS_MAGIC, return;);
@@ -191,13 +193,14 @@ static void state_s_disconnect( struct iriap_cb *self, IRIAP_EVENT event,
case IAP_CALL_REQUEST_GVBC:
iriap_next_client_state( self, S_CONNECTING);
self->skb = skb;
- irlmp_connect_request( self->lsap, LSAP_IAS, self->daddr,
- NULL, NULL);
+ ret = irlmp_connect_request( self->lsap, LSAP_IAS,
+ self->saddr, self->daddr,
+ NULL, NULL);
break;
case IAP_LM_DISCONNECT_INDICATION:
break;
default:
- DEBUG( 0, "state_s_disconnect: Unknown event %d\n", event);
+ DEBUG( 0, __FUNCTION__"(), Unknown event %d\n", event);
break;
}
}
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 8b5a29748..b2e6a5cba 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 1 22:50:04 1998
- * Modified at: Tue Dec 15 09:19:43 1998
+ * Modified at: Mon Mar 22 13:22:35 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli, All Rights Reserved.
@@ -23,6 +23,8 @@
********************************************************************/
#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/irda.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -227,7 +229,6 @@ void irias_add_attrib( struct ias_object *obj, struct ias_attrib *attrib)
*
* Change the value of an objects attribute.
*
- * TODO: not tested yet!
*/
int irias_object_change_attribute( char *obj_name, char *attrib_name,
struct ias_value *new_value)
diff --git a/net/irda/irlan/Config.in b/net/irda/irlan/Config.in
index a9a7b84f2..7951d90ab 100644
--- a/net/irda/irlan/Config.in
+++ b/net/irda/irlan/Config.in
@@ -1,7 +1 @@
-
dep_tristate 'IrLAN protocol' CONFIG_IRLAN $CONFIG_IRDA
-
-if [ "$CONFIG_IRLAN" != "n" ]; then
- dep_tristate ' IrLAN client support' CONFIG_IRLAN_CLIENT $CONFIG_IRLAN
- dep_tristate ' IrLAN server support' CONFIG_IRLAN_SERVER $CONFIG_IRLAN
-fi
diff --git a/net/irda/irlan/Makefile b/net/irda/irlan/Makefile
index d5b37b9d8..6d4cb8433 100644
--- a/net/irda/irlan/Makefile
+++ b/net/irda/irlan/Makefile
@@ -9,44 +9,13 @@
MOD_LIST_NAME := IRDA_MODULES
O_TARGET := irlan.o
-O_OBJS := irlan_common.o irlan_eth.o irlan_event.o
+O_OBJS := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o
M_OBJS := $(O_TARGET)
MI_OBJS :=
OX_OBJS +=
-ifeq ($(CONFIG_IRLAN_CLIENT),y)
-O_OBJS += irlan_cli.o irlan_cli_event.o
-else
- ifeq ($(CONFIG_IRLAN_CLIENT),m)
-# MI_OBJS += irlan_cli.o irlan_cli_event.o
- M_OBJS += irlan_client.o
- endif
-endif
-
-ifeq ($(CONFIG_IRLAN_SERVER),y)
-O_OBJS += irlan_srv.o irlan_srv_event.o
-else
- ifeq ($(CONFIG_IRLAN_SERVER),m)
-# MI_OBJS += irlan_srv.o irlan_srv_event.o
- M_OBJS += irlan_server.o
- endif
-endif
-
-# Special rule to build the composite modules
-ifeq ($(CONFIG_IRLAN),m)
-irlan_client.o: irlan_cli.o irlan_cli_event.o
- $(LD) $(LD_RFLAG) -r -o $@ irlan_cli.o irlan_cli_event.o
-
-irlan_server.o: irlan_srv.o irlan_srv_event.o
- $(LD) $(LD_RFLAG) -r -o $@ irlan_srv.o irlan_srv_event.o
-endif
-
include $(TOPDIR)/Rules.make
tar:
tar -cvf /dev/f1 .
-
-
-
-
diff --git a/net/irda/irlan/irlan_cli.c b/net/irda/irlan/irlan_cli.c
deleted file mode 100644
index 700f25e9c..000000000
--- a/net/irda/irlan/irlan_cli.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_cli.c
- * Version: 0.8
- * Description: IrDA LAN Access Protocol Client
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Mon Jan 18 13:24:26 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
- * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
- * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
- *
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <net/arp.h>
-
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/byteorder.h>
-
-#include <net/irda/irda.h>
-#include <net/irda/irttp.h>
-#include <net/irda/irlmp.h>
-#include <net/irda/irias_object.h>
-#include <net/irda/iriap.h>
-#include <net/irda/timer.h>
-
-#include <net/irda/irlan_common.h>
-#include <net/irda/irlan_event.h>
-#include <net/irda/irlan_eth.h>
-#include <net/irda/irlan_cli.h>
-
-/*
- * Private functions
- */
-static struct irlan_cb *irlan_client_open( __u32 saddr, __u32 daddr);
-static void irlan_client_close( struct irlan_cb *self);
-
-
-static int irlan_client_eth_open( struct device *dev)
-{
- /* struct irlan_cb *self = (struct irlan_cb *) dev->priv; */
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( dev != NULL, return -1;);
-
- /* Ready to play! */
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
-
- MOD_INC_USE_COUNT;
-
- return 0;
-}
-
-/*
- * Function irlan_eth_close (dev)
- *
- * Stop the Client ether network device, his function will be called by
- * ifconfig down.
- */
-static int irlan_client_eth_close(struct device *dev)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( dev != NULL, return -1;);
-
- /* Stop device */
- dev->tbusy = 1;
- dev->start = 0;
-
- MOD_DEC_USE_COUNT;
-
- return 0;
-}
-
-/*
- * Function irlan_client_eth_init (dev)
- *
- *
- *
- */
-int irlan_client_eth_init( struct device *dev)
-{
- irlan_eth_init( dev);
-
- /* Overrride some functions */
- dev->open = irlan_client_eth_open;
- dev->stop = irlan_client_eth_close;
-
- return 0;
-}
-
-/*
- * Function irlan_client_init (dev)
- *
- * Allocates the master array. Called by modprobe().
- */
-__initfunc(int irlan_client_init( void))
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- /* Register with IrLMP as a service user */
- irlmp_register_layer( S_LAN, CLIENT, TRUE,
- irlan_discovery_indication);
-
- /* Do some fast discovery! */
- irlmp_discovery_request( 8);
-
- return 0;
-}
-
-/*
- * Function irlan_client_cleanup (void)
- *
- * Removes all instances of the IrLAN network device driver, and the
- * master array. Called by rmmod().
- *
- */
-void irlan_client_cleanup(void)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- irlmp_unregister_layer( S_LAN, CLIENT);
-}
-
-/*
- * Function irlan_client_open (void)
- *
- * This function allocates and opens a new instance of the IrLAN network
- * device driver.
- *
- */
-static struct irlan_cb *irlan_client_open( __u32 saddr, __u32 daddr)
-{
- struct irlan_cb *self;
- int result;
-
- DEBUG( 4, "IrLAN: irlan_client_open()\n");
-
- ASSERT( irlan != NULL, return NULL;);
-
- self = irlan_open();
- if ( self == NULL)
- return NULL;
-
- /* Use default name instead! */
- /* sprintf( self->ifname, "irlan%d", ); */
- self->dev.name = self->ifname;
- self->dev.priv = (void *) self;
- self->dev.next = NULL;
- self->dev.init = irlan_client_eth_init;
-
- self->saddr = saddr;
- self->daddr = daddr;
-
- /*
- * Insert ourself into the hashbin
- */
- hashbin_insert( irlan, (QUEUE *) self, saddr, NULL);
-
- if (( result = register_netdev( &self->dev)) != 0) {
- DEBUG( 0, "IrLAN, Register_netdev() failed!\n");
- return NULL;
- }
-
- irlan_next_state( self, IRLAN_IDLE);
-
- irlan_client_open_tsaps( self);
-
- return self;
-}
-
-/*
- * Function irlan_client_close (self)
- *
- *
- *
- */
-static void irlan_client_close( struct irlan_cb *self)
-{
- struct irlan_cb *entry;
-
- DEBUG( 0, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- entry = hashbin_remove( irlan, self->daddr, NULL);
-
- ASSERT( entry == self, return;);
-
- /* __irlan_close( self); */
-}
-
-/*
- * Function irlan_discovery_indication (daddr)
- *
- * Remote device with IrLAN server support discovered
- *
- */
-void irlan_discovery_indication( DISCOVERY *discovery)
-{
- struct irlan_cb *self;
- __u32 saddr, daddr;
-
- ASSERT( irlan != NULL, return;);
- ASSERT( discovery != NULL, return;);
-
- saddr = discovery->saddr;
- daddr = discovery->daddr;
-
- /*
- * Check if an instance is already dealing with this device
- * (saddr)
- */
- self = (struct irlan_cb *) hashbin_find( irlan, saddr, NULL);
- if ( self != NULL) {
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- DEBUG( 4, __FUNCTION__ "(), Found instance!\n");
- if ( self->state == IRLAN_IDLE) {
- /* daddr may have changed! */
- self->daddr = daddr;
-
- irlan_do_client_event( self,
- IRLAN_DISCOVERY_INDICATION,
- NULL);
- } else {
- DEBUG( 0, __FUNCTION__ "(), state=%s\n",
- irlan_state[ self->state]);
- /*
- * If we get here, it's obvious that the last
- * connection attempt has failed, so its best
- * to go back to idle!
- */
- irlan_do_client_event( self, IRLAN_LMP_DISCONNECT,
- NULL);
- }
- return;
- }
-
- /*
- * We have no instance for daddr, so time to start a new instance.
- */
- DEBUG( 0, __FUNCTION__ "(), Opening new instance for saddr=%#x\n",
- saddr);
-
- if (( self = irlan_client_open( saddr, daddr)) == NULL) {
- DEBUG( 0, "irlan_client_open failed!\n");
- return;
- }
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- DEBUG( 4, "Setting irlan_client state!\n");
- if ( self->state == IRLAN_IDLE) {
- irlan_do_client_event( self, IRLAN_DISCOVERY_INDICATION, NULL);
- } else {
- DEBUG( 0, __FUNCTION__ "(), Hmm, got here too!\n");
- }
-}
-
-/*
- * Function irlan_client_disconnect_indication (handle)
- *
- * Callback function for the IrTTP layer. Indicates a disconnection of
- * the specified connection (handle)
- */
-void irlan_client_disconnect_indication( void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *userdata)
-{
- struct irlan_cb *self;
- struct tsap_cb *tsap;
-
- self = ( struct irlan_cb *) instance;
- tsap = ( struct tsap_cb *) sap;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
- ASSERT( tsap != NULL, return;);
- ASSERT( tsap->magic == TTP_TSAP_MAGIC, return;);
-
- DEBUG( 4, __FUNCTION__ "(), reason=%d\n", reason);
-
- if ( tsap == self->tsap_data) {
- DEBUG( 4, "IrLAN, data channel disconnected by peer!\n");
- self->connected = FALSE;
- } else if ( tsap == self->tsap_ctrl) {
- DEBUG( 4, "IrLAN, control channel disconnected by peer!\n");
- } else {
- DEBUG( 0, "Error, disconnect on unknown handle!\n");
- }
-
- /* Stop IP from transmitting more packets */
- /* irlan_client_flow_indication( handle, FLOW_STOP, priv); */
-
- irlan_do_client_event( self, IRLAN_LMP_DISCONNECT, NULL);
-}
-
-/*
- * Function irlan_control_data_indication (handle, skb)
- *
- * This function gets the data that is received on the control channel
- *
- */
-void irlan_client_ctrl_data_indication( void *instance, void *sap,
- struct sk_buff *skb)
-{
- struct irlan_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irlan_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- DEBUG( 4, "Got IRLAN_DATA_INDICATION!\n");
- irlan_do_client_event( self, IRLAN_DATA_INDICATION, skb);
-}
-
-/*
- * Function irlan_client_open_tsaps (self)
- *
- * Initialize callbacks and open IrTTP TSAPs
- *
- */
-void irlan_client_open_tsaps( struct irlan_cb *self)
-{
- /* struct irlan_frame frame; */
- struct notify_t notify_ctrl;
- struct notify_t notify_data;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- irda_notify_init( &notify_ctrl);
- irda_notify_init( &notify_data);
-
- /* Set up callbacks */
- notify_ctrl.data_indication = irlan_client_ctrl_data_indication;
- notify_ctrl.connect_confirm = irlan_client_connect_confirm;
- notify_ctrl.disconnect_indication = irlan_client_disconnect_indication;
- notify_ctrl.instance = self;
- strncpy( notify_ctrl.name, "IrLAN ctrl", NOTIFY_MAX_NAME);
-
- notify_data.data_indication = irlan_eth_rx;
- notify_data.udata_indication = irlan_eth_rx;
- notify_data.connect_confirm = irlan_client_connect_confirm;
- notify_data.flow_indication = irlan_eth_flow_indication;
- notify_data.disconnect_indication = irlan_client_disconnect_indication;
- notify_data.instance = self;
- strncpy( notify_data.name, "IrLAN data", NOTIFY_MAX_NAME);
-
- /* Create TSAP's */
- self->tsap_ctrl = irttp_open_tsap( LSAP_ANY,
- DEFAULT_INITIAL_CREDIT,
- &notify_ctrl);
- self->tsap_data = irttp_open_tsap( LSAP_ANY,
- DEFAULT_INITIAL_CREDIT,
- &notify_data);
-}
-
-/*
- * Function irlan_client_connect_confirm (handle, skb)
- *
- * Connection to peer IrLAN laye confirmed
- *
- */
-void irlan_client_connect_confirm( void *instance, void *sap,
- struct qos_info *qos, int max_sdu_size,
- struct sk_buff *skb)
-{
- struct irlan_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irlan_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- /* TODO: we could set the MTU depending on the max_sdu_size */
-
- irlan_do_client_event( self, IRLAN_CONNECT_COMPLETE, NULL);
-}
-
-/*
- * Function irlan_client_reconnect_data_channel (self)
- *
- * Try to reconnect data channel (currently not used)
- *
- */
-void irlan_client_reconnect_data_channel( struct irlan_cb *self)
-{
- struct sk_buff *skb;
- __u8 *frame;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- skb = dev_alloc_skb( 128);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an sk_buff of length %d\n", 64);
- return;
- }
-
- /* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
-
- frame = skb->data;
-
- frame[0] = CMD_RECONNECT_DATA_CHAN;
- frame[1] = 0x01;
- insert_array_param( skb, "RECONNECT_KEY",
- self->t.client.reconnect_key,
- self->t.client.key_len);
-
- irttp_data_request( self->tsap_ctrl, skb);
-}
-
-/*
- * Function irlan_client_extract_params (skb)
- *
- * Extract all parameters from received buffer, then feed them to
- * check_params for parsing
- *
- */
-void irlan_client_extract_params( struct irlan_cb *self,
- struct sk_buff *skb)
-{
- __u8 *frame;
- __u8 *ptr;
- int count;
- int ret;
- int val_len;
- int i;
-
- ASSERT( skb != NULL, return;);
-
- DEBUG( 4, __FUNCTION__ "() skb->len=%d\n", (int) skb->len);
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- if ( skb == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Got NULL skb!\n");
- return;
- }
- frame = skb->data;
-
- /*
- * Check return code and print it if not success
- */
- if ( frame[0])
- print_ret_code( frame[0]);
-
- /* How many parameters? */
- count = frame[1];
-
- DEBUG( 4, "Got %d parameters\n", count);
-
- ptr = frame+2;
-
- /* For all parameters */
- for ( i=0; i<count;i++) {
- ret = irlan_get_response_param( ptr, self->name, self->value,
- &val_len);
- if ( ret == -1) {
- DEBUG( 0, __FUNCTION__ "(), IrLAN, Error!\n");
- return;
- }
- ptr+=ret;
- check_response_param( self, self->name, self->value, val_len);
- }
-}
-
-/*
- * Function check_param (param, value)
- *
- * Check which parameter is received and update local variables
- *
- */
-void check_response_param( struct irlan_cb *self, char *param,
- char *value, int val_len)
-{
- int i;
- __u8 *bytes;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- /*
- * Media type
- */
- if ( strcmp( param, "MEDIA") == 0) {
- if ( strcmp( value, "802.3") == 0)
- self->media = MEDIA_802_3;
- else
- self->media = MEDIA_802_5;
- return;
- }
- /*
- * IRLAN version
- */
- if ( strcmp( param, "IRLAN_VER") == 0) {
- DEBUG( 4, "IrLAN version %d.%d\n",
- (__u8) value[0], (__u8) value[1]);
- return;
- }
- /*
- * Which remote TSAP to use for data channel
- */
- if ( strcmp( param, "DATA_CHAN") == 0) {
- self->dtsap_sel_data = value[0];
- DEBUG( 4, "Data TSAP = %02x\n", self->dtsap_sel_data);
- return;
- }
- /*
- * RECONNECT_KEY, in case the link goes down!
- */
- if ( strcmp( param, "RECONNECT_KEY") == 0) {
- DEBUG( 4, "Got reconnect key: ");
- /* for (i = 0; i < val_len; i++) */
-/* printk( "%02x", value[i]); */
- memcpy( self->t.client.reconnect_key, value, val_len);
- self->t.client.key_len = val_len;
- DEBUG( 4, "\n");
- }
- /*
- * FILTER_ENTRY, have we got the ethernet address?
- */
- if ( strcmp( param, "FILTER_ENTRY") == 0) {
- bytes = value;
- DEBUG( 4, "Ethernet address = %02x:%02x:%02x:%02x:%02x:%02x\n",
- bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
- bytes[5]);
- for (i = 0; i < 6; i++)
- self->dev.dev_addr[i] = bytes[i];
-#if 0
- /*
- * When we get a new MAC address do a gratuitous ARP. This is useful
- * if we have changed access points on the same subnet.
- */
- DEBUG( 4, "IrLAN: Sending gratuitous ARP\n");
- arp_send( ARPOP_REQUEST, ETH_P_ARP, self->dev.pa_addr,
- &self->dev, self->dev.pa_addr, NULL,
- self->dev.dev_addr, NULL);
-#endif
- }
-}
-
-/*
- * Function irlan_client_get_value_confirm (obj_id, value)
- *
- * Got results from previous GetValueByClass request
- *
- */
-void irlan_client_get_value_confirm( __u16 obj_id, struct ias_value *value,
- void *priv)
-{
- struct irlan_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( priv != NULL, return;);
-
- self = ( struct irlan_cb *) priv;
-
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- switch ( value->type) {
- case IAS_INTEGER:
- self->dtsap_sel_ctrl = value->t.integer;
-
- if ( value->t.integer != -1) {
- irlan_do_client_event( self, IRLAN_IAS_PROVIDER_AVAIL,
- NULL);
- return;
- }
- break;
- case IAS_STRING:
- DEBUG( 0, __FUNCTION__ "(), got string %s\n", value->t.string);
- break;
- case IAS_OCT_SEQ:
- DEBUG( 0, __FUNCTION__ "(), OCT_SEQ not implemented\n");
- break;
- case IAS_MISSING:
- DEBUG( 0, __FUNCTION__ "(), MISSING not implemented\n");
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), unknown type!\n");
- break;
- }
- irlan_do_client_event( self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL);
-}
-
-#ifdef MODULE
-
-MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
-MODULE_DESCRIPTION("The Linux IrDA LAN protocol");
-
-/*
- * Function init_module (void)
- *
- * Initialize the IrLAN module, this function is called by the
- * modprobe(1) program.
- */
-int init_module(void)
-{
- DEBUG( 4, __FUNCTION__ "(), irlan_client.c\n");
-
- irlan_client_init();
-
- return 0;
-}
-
-/*
- * Function cleanup_module (void)
- *
- * Remove the IrLAN module, this function is called by the rmmod(1)
- * program
- */
-void cleanup_module(void)
-{
- DEBUG( 4, "--> irlan, cleanup_module\n");
- /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
-
- /* Free some memory */
- irlan_client_cleanup();
-
- DEBUG( 4, "irlan, cleanup_module -->\n");
-}
-
-#endif /* MODULE */
-
-
-
-
-
-
-
-
-
diff --git a/net/irda/irlan/irlan_cli_event.c b/net/irda/irlan/irlan_cli_event.c
deleted file mode 100644
index 2f01a4b4a..000000000
--- a/net/irda/irlan/irlan_cli_event.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_cli_event.c
- * Version: 0.1
- * Description: IrLAN Client FSM (Finite State Machine)
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Wed Dec 9 02:36:49 1998
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#include <linux/skbuff.h>
-
-#include <net/irda/irda.h>
-#include <net/irda/irmod.h>
-#include <net/irda/iriap.h>
-#include <net/irda/irlmp.h>
-#include <net/irda/irttp.h>
-
-#include <net/irda/irlan_common.h>
-#include <net/irda/irlan_cli.h>
-#include <net/irda/irlan_event.h>
-
-static int irlan_client_state_idle ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_query( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_conn ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_info ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_media( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_open ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_wait ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_arb ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_data ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_close( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-static int irlan_client_state_sync ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb);
-
-static int (*state[])( struct irlan_cb *, IRLAN_EVENT event,
- struct sk_buff *) =
-{
- irlan_client_state_idle,
- irlan_client_state_query,
- irlan_client_state_conn,
- irlan_client_state_info,
- irlan_client_state_media,
- irlan_client_state_open,
- irlan_client_state_wait,
- irlan_client_state_arb,
- irlan_client_state_data,
- irlan_client_state_close,
- irlan_client_state_sync
-};
-
-void irlan_do_client_event( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- (*state[ self->state]) ( self, event, skb);
-}
-
-/*
- * Function irlan_client_state_idle (event, skb, info)
- *
- * IDLE, We are waiting for an indication that there is a provider
- * available.
- */
-static int irlan_client_state_idle( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 4, "irlan_client_state_idle()\n");
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRLAN_MAGIC, return -1;);
-
- switch( event) {
- case IRLAN_DISCOVERY_INDICATION:
- /* Get some values from peer IAS */
-#if 0
- iriap_getvaluebyclass_request( self->daddr,
- /* "PnP", "DeviceID", */
- "Device", "DeviceName",
- irlan_client_get_value_confirm,
- self);
-#endif
- iriap_getvaluebyclass_request( self->daddr,
- "IrLAN", "IrDA:TinyTP:LsapSel",
- irlan_client_get_value_confirm,
- self);
-
- irlan_next_state( self, IRLAN_QUERY);
- break;
- default:
- DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n", event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
- return 0;
-}
-
-/*
- * Function irlan_client_state_query (event, skb, info)
- *
- * QUERY, We have queryed the remote IAS and is ready to connect
- * to provider, just waiting for the confirm.
- *
- */
-static int irlan_client_state_query( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRLAN_MAGIC, return -1;);
-
- switch( event) {
- case IRLAN_IAS_PROVIDER_AVAIL:
- ASSERT( self->dtsap_sel_ctrl != 0, return -1;);
-
- self->t.client.open_retries = 0;
-
- irttp_connect_request( self->tsap_ctrl, self->dtsap_sel_ctrl,
- self->daddr, NULL, IRLAN_MTU, NULL);
- irlan_next_state( self, IRLAN_CONN);
- break;
- case IRLAN_IAS_PROVIDER_NOT_AVAIL:
- DEBUG( 0, __FUNCTION__ "(), IAS_PROVIDER_NOT_AVAIL\n");
- irlan_next_state( self, IRLAN_IDLE);
- break;
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__"(), Unknown event %d\n", event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_conn (event, skb, info)
- *
- * CONN, We have connected to a provider but has not issued any
- * commands yet.
- *
- */
-static int irlan_client_state_conn( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_CONNECT_COMPLETE:
- /* Send getinfo cmd */
- irlan_get_provider_info( self);
- irlan_next_state( self, IRLAN_INFO);
- break;
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_info (self, event, skb, info)
- *
- * INFO, We have issued a GetInfo command and is awaiting a reply.
- */
-static int irlan_client_state_info( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_DATA_INDICATION:
- ASSERT( skb != NULL, return -1;);
-
- irlan_client_extract_params( self, skb);
-
- irlan_next_state( self, IRLAN_MEDIA);
-
- irlan_get_media_char( self);
- break;
-
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_media (self, event, skb, info)
- *
- * MEDIA, The irlan_client has issued a GetMedia command and is awaiting a
- * reply.
- *
- */
-static int irlan_client_state_media( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_DATA_INDICATION:
- irlan_client_extract_params( self, skb);
- irlan_open_data_channel( self);
- irlan_next_state( self, IRLAN_OPEN);
- break;
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, "irlan_client_state_media, Unknown event %d\n",
- event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_open (self, event, skb, info)
- *
- * OPEN, The irlan_client has issued a OpenData command and is awaiting a
- * reply
- *
- */
-static int irlan_client_state_open( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- struct qos_info qos;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_DATA_INDICATION:
- irlan_client_extract_params( self, skb);
-
- /*
- * Check if we have got the remote TSAP for data
- * communications
- */
- ASSERT( self->dtsap_sel_data != 0, return -1;);
-
- qos.link_disc_time.bits = 0x01; /* 3 secs */
-
- irttp_connect_request( self->tsap_data,
- self->dtsap_sel_data, self->daddr,
- NULL, IRLAN_MTU, NULL);
-
- irlan_next_state( self, IRLAN_DATA);
- break;
-
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n",
- event);
- break;
- }
-
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_wait (self, event, skb, info)
- *
- * WAIT, The irlan_client is waiting for the local provider to enter the
- * provider OPEN state.
- *
- */
-static int irlan_client_state_wait( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 4, "irlan_client_state_wait()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n",
- event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-static int irlan_client_state_arb( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 0, __FUNCTION__ "(), not implemented!\n");
-
- if ( skb) {
- dev_kfree_skb( skb);
- }
- return 0;
-}
-
-/*
- * Function irlan_client_state_data (self, event, skb, info)
- *
- * DATA, The data channel is connected, allowing data transfers between
- * the local and remote machines.
- *
- */
-static int irlan_client_state_data( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- struct irmanager_event mgr_event;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRLAN_MAGIC, return -1;);
-
- switch( event) {
- case IRLAN_CONNECT_COMPLETE:
- irlan_get_unicast_addr( self);
- irlan_open_unicast_addr( self);
- /* irlan_set_broadcast_filter( self, TRUE); */
-
- DEBUG( 4, "IrLAN, We are now connected!\n");
-
- /* irlan_next_state( LAN_DATA); */
- break;
- case IRLAN_DATA_INDICATION:
- irlan_client_extract_params( self, skb);
-
- /* irlan_client_flow_indication( self->data_tsap, FLOW_START, */
-/* self); */
-
- /* Make sure the code below only runs once */
- if ( !self->connected) {
- mgr_event.event = EVENT_IRLAN_START;
- sprintf( mgr_event.devname, "%s", self->ifname);
- irmanager_notify( &mgr_event);
-
- self->connected = TRUE;
- }
- break;
-
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- mgr_event.event = EVENT_IRLAN_STOP;
- sprintf( mgr_event.devname, "%s", self->ifname);
- irmanager_notify( &mgr_event);
-
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_close (self, event, skb, info)
- *
- *
- *
- */
-static int irlan_client_state_close( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 0, __FUNCTION__ "()\n");
-
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_client_state_sync (self, event, skb, info)
- *
- *
- *
- */
-static int irlan_client_state_sync( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb)
-{
- DEBUG( 0, __FUNCTION__ "()\n");
-
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
new file mode 100644
index 000000000..f2f8271cf
--- /dev/null
+++ b/net/irda/irlan/irlan_client.c
@@ -0,0 +1,589 @@
+/*********************************************************************
+ *
+ * Filename: irlan_client.c
+ * Version: 0.9
+ * Description: IrDA LAN Access Protocol (IrLAN) Client
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Thu Apr 22 23:03:55 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/iriap.h>
+#include <net/irda/timer.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_event.h>
+#include <net/irda/irlan_eth.h>
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_client.h>
+
+#undef CONFIG_IRLAN_GRATUITOUS_ARP
+
+static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *);
+static int irlan_client_ctrl_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ struct sk_buff *);
+static void irlan_check_response_param(struct irlan_cb *self, char *param,
+ char *value, int val_len);
+
+static void irlan_client_kick_timer_expired(unsigned long data)
+{
+ struct irlan_cb *self = (struct irlan_cb *) data;
+
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * If we are in peer mode, the client may not have got the discovery
+ * indication it needs to make progress. If the client is still in
+ * IDLE state, we must kick it to, but only if the provider is not IDLE
+ */
+ if ((self->access_type == ACCESS_PEER) &&
+ (self->client.state == IRLAN_IDLE) &&
+ (self->provider.state != IRLAN_IDLE)) {
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+ }
+}
+
+void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ irda_start_timer(&self->client.kick_timer, timeout,
+ (unsigned long) self,
+ irlan_client_kick_timer_expired);
+}
+
+/*
+ * Function irlan_client_wakeup (self, saddr, daddr)
+ *
+ * Wake up client
+ *
+ */
+void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr)
+{
+ struct irmanager_event mgr_event;
+
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if we are already awake */
+ if (self->client.state != IRLAN_IDLE)
+ return;
+
+ /* saddr may have changed! */
+ self->saddr = saddr;
+
+ /* Check if network device is up */
+ if (self->dev.start) {
+ /* Open TSAPs */
+ irlan_client_open_ctrl_tsap(self);
+ irlan_provider_open_ctrl_tsap(self);
+ irlan_open_data_tsap(self);
+
+ irlan_do_client_event(self, IRLAN_DISCOVERY_INDICATION, NULL);
+ } else if (self->notify_irmanager) {
+ /*
+ * Tell irmanager that the device can now be
+ * configured but only if the device was not taken
+ * down by the user
+ */
+ mgr_event.event = EVENT_IRLAN_START;
+ sprintf(mgr_event.devname, "%s", self->ifname);
+ irmanager_notify(&mgr_event);
+
+ /*
+ * We set this so that we only notify once, since if
+ * configuration of the network device fails, the user
+ * will have to sort it out first anyway. No need to
+ * try again.
+ */
+ self->notify_irmanager = FALSE;
+ }
+ /* Restart watchdog timer */
+ irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
+
+ /* Start kick timer */
+ irlan_client_start_kick_timer(self, 2*HZ);
+}
+
+/*
+ * Function irlan_discovery_indication (daddr)
+ *
+ * Remote device with IrLAN server support discovered
+ *
+ */
+void irlan_client_discovery_indication(discovery_t *discovery)
+{
+ struct irlan_cb *self, *entry;
+ __u32 saddr, daddr;
+
+ DEBUG(0, __FUNCTION__"()\n");
+
+ ASSERT(irlan != NULL, return;);
+ ASSERT(discovery != NULL, return;);
+
+ saddr = discovery->saddr;
+ daddr = discovery->daddr;
+
+ /*
+ * Check if we already dealing with this provider.
+ */
+ self = (struct irlan_cb *) hashbin_find(irlan, daddr, NULL);
+ if (self) {
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ DEBUG(2, __FUNCTION__ "(), Found instance!\n");
+
+ irlan_client_wakeup(self, saddr, daddr);
+
+ return;
+ }
+
+ /*
+ * We have no instance for daddr, so try and find an unused one
+ */
+ self = hashbin_find(irlan, DEV_ADDR_ANY, NULL);
+ if (self) {
+ DEBUG(0, __FUNCTION__ "(), Found instance with DEV_ADDR_ANY!\n");
+ /*
+ * Rehash instance, now we have a client (daddr) to serve.
+ */
+ entry = hashbin_remove(irlan, self->daddr, NULL);
+ ASSERT(entry == self, return;);
+
+ self->daddr = daddr;
+ self->saddr = saddr;
+
+ DEBUG(0, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
+ hashbin_insert(irlan, (QUEUE*) self, self->daddr, NULL);
+
+ /* Check if network device has been registered */
+ if (!self->netdev_registered)
+ irlan_register_netdev(self);
+
+ /* Restart watchdog timer */
+ irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
+ }
+}
+
+/*
+ * Function irlan_client_data_indication (handle, skb)
+ *
+ * This function gets the data that is received on the control channel
+ *
+ */
+static int irlan_client_ctrl_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) instance;
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+ ASSERT(skb != NULL, return -1;);
+
+ irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb);
+
+ return 0;
+}
+
+static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *userdata)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ DEBUG(4, __FUNCTION__ "(), reason=%d\n", reason);
+
+ self = (struct irlan_cb *) instance;
+ tsap = (struct tsap_cb *) sap;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+ ASSERT(tsap != NULL, return;);
+ ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
+
+ ASSERT(tsap == self->client.tsap_ctrl, return;);
+
+ irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
+}
+
+/*
+ * Function irlan_client_open_tsaps (self)
+ *
+ * Initialize callbacks and open IrTTP TSAPs
+ *
+ */
+void irlan_client_open_ctrl_tsap(struct irlan_cb *self)
+{
+ struct notify_t notify;
+ struct tsap_cb *tsap;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if already open */
+ if (self->client.tsap_ctrl)
+ return;
+
+ irda_notify_init(&notify);
+
+ /* Set up callbacks */
+ notify.data_indication = irlan_client_ctrl_data_indication;
+ notify.connect_confirm = irlan_client_ctrl_connect_confirm;
+ notify.disconnect_indication = irlan_client_ctrl_disconnect_indication;
+ notify.instance = self;
+ strncpy(notify.name, "IrLAN ctrl (c)", NOTIFY_MAX_NAME);
+
+ tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
+ if (!tsap) {
+ DEBUG(2, __FUNCTION__ "(), Got no tsap!\n");
+ return;
+ }
+ self->client.tsap_ctrl = tsap;
+}
+
+/*
+ * Function irlan_client_connect_confirm (handle, skb)
+ *
+ * Connection to peer IrLAN laye confirmed
+ *
+ */
+static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) instance;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* TODO: we could set the MTU depending on the max_sdu_size */
+
+ irlan_do_client_event(self, IRLAN_CONNECT_COMPLETE, NULL);
+}
+
+/*
+ * Function irlan_client_reconnect_data_channel (self)
+ *
+ * Try to reconnect data channel (currently not used)
+ *
+ */
+void irlan_client_reconnect_data_channel(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = dev_alloc_skb(128);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ frame[0] = CMD_RECONNECT_DATA_CHAN;
+ frame[1] = 0x01;
+ irlan_insert_array_param(skb, "RECONNECT_KEY",
+ self->client.reconnect_key,
+ self->client.key_len);
+
+ irttp_data_request(self->client.tsap_ctrl, skb);
+}
+
+/*
+ * Function irlan_client_parse_response (self, skb)
+ *
+ * Extract all parameters from received buffer, then feed them to
+ * check_params for parsing
+ */
+void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb)
+{
+ __u8 *frame;
+ __u8 *ptr;
+ int count;
+ int ret;
+ __u16 val_len;
+ int i;
+ char *name;
+ char *value;
+
+ ASSERT(skb != NULL, return;);
+
+ DEBUG(4, __FUNCTION__ "() skb->len=%d\n", (int) skb->len);
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ if (!skb) {
+ ERROR( __FUNCTION__ "(), Got NULL skb!\n");
+ return;
+ }
+ frame = skb->data;
+
+ /*
+ * Check return code and print it if not success
+ */
+ if (frame[0]) {
+ print_ret_code(frame[0]);
+ return;
+ }
+
+ name = kmalloc(255, GFP_ATOMIC);
+ if (!name)
+ return;
+ value = kmalloc(1016, GFP_ATOMIC);
+ if (!value) {
+ kfree(name);
+ return;
+ }
+
+ /* How many parameters? */
+ count = frame[1];
+
+ DEBUG(4, __FUNCTION__ "(), got %d parameters\n", count);
+
+ ptr = frame+2;
+
+ /* For all parameters */
+ for (i=0; i<count;i++) {
+ ret = irlan_extract_param(ptr, name, value, &val_len);
+ if (ret == -1) {
+ DEBUG(2, __FUNCTION__ "(), IrLAN, Error!\n");
+ break;
+ }
+ ptr+=ret;
+ irlan_check_response_param(self, name, value, val_len);
+ }
+ /* Cleanup */
+ kfree(name);
+ kfree(value);
+}
+
+/*
+ * Function check_param (param, value)
+ *
+ * Check which parameter is received and update local variables
+ *
+ */
+static void irlan_check_response_param(struct irlan_cb *self, char *param,
+ char *value, int val_len)
+{
+ __u16 tmp_cpu; /* Temporary value in host order */
+ __u8 *bytes;
+ int i;
+
+ DEBUG(4, __FUNCTION__ "(), parm=%s\n", param);
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * Media type
+ */
+ if (strcmp(param, "MEDIA") == 0) {
+ if (strcmp(value, "802.3") == 0)
+ self->media = MEDIA_802_3;
+ else
+ self->media = MEDIA_802_5;
+ return;
+ }
+ if (strcmp(param, "FILTER_TYPE") == 0) {
+ if (strcmp(value, "DIRECTED") == 0)
+ self->client.filter_type |= IRLAN_DIRECTED;
+ else if (strcmp(value, "FUNCTIONAL") == 0)
+ self->client.filter_type |= IRLAN_FUNCTIONAL;
+ else if (strcmp(value, "GROUP") == 0)
+ self->client.filter_type |= IRLAN_GROUP;
+ else if (strcmp(value, "MAC_FRAME") == 0)
+ self->client.filter_type |= IRLAN_MAC_FRAME;
+ else if (strcmp(value, "MULTICAST") == 0)
+ self->client.filter_type |= IRLAN_MULTICAST;
+ else if (strcmp(value, "BROADCAST") == 0)
+ self->client.filter_type |= IRLAN_BROADCAST;
+ else if (strcmp(value, "IPX_SOCKET") == 0)
+ self->client.filter_type |= IRLAN_IPX_SOCKET;
+
+ }
+ if (strcmp(param, "ACCESS_TYPE") == 0) {
+ if (strcmp(value, "DIRECT") == 0)
+ self->access_type = ACCESS_DIRECT;
+ else if (strcmp(value, "PEER") == 0)
+ self->access_type = ACCESS_PEER;
+ else if (strcmp(value, "HOSTED") == 0)
+ self->access_type = ACCESS_HOSTED;
+ else {
+ DEBUG(2, __FUNCTION__ "(), unknown access type!\n");
+ }
+ }
+ /*
+ * IRLAN version
+ */
+ if (strcmp(param, "IRLAN_VER") == 0) {
+ DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0],
+ (__u8) value[1]);
+
+ self->version[0] = value[0];
+ self->version[1] = value[1];
+ return;
+ }
+ /*
+ * Which remote TSAP to use for data channel
+ */
+ if (strcmp(param, "DATA_CHAN") == 0) {
+ self->dtsap_sel_data = value[0];
+ DEBUG(4, "Data TSAP = %02x\n", self->dtsap_sel_data);
+ return;
+ }
+ if (strcmp(param, "CON_ARB") == 0) {
+ memcpy(&tmp_cpu, value, 2); /* Align value */
+ le16_to_cpus(&tmp_cpu); /* Convert to host order */
+ self->client.recv_arb_val = tmp_cpu;
+ DEBUG(2, __FUNCTION__ "(), receive arb val=%d\n",
+ self->client.recv_arb_val);
+ }
+ if (strcmp(param, "MAX_FRAME") == 0) {
+ memcpy(&tmp_cpu, value, 2); /* Align value */
+ le16_to_cpus(&tmp_cpu); /* Convert to host order */
+ self->client.max_frame = tmp_cpu;
+ DEBUG(4, __FUNCTION__ "(), max frame=%d\n",
+ self->client.max_frame);
+ }
+
+ /*
+ * RECONNECT_KEY, in case the link goes down!
+ */
+ if (strcmp(param, "RECONNECT_KEY") == 0) {
+ DEBUG(4, "Got reconnect key: ");
+ /* for (i = 0; i < val_len; i++) */
+/* printk("%02x", value[i]); */
+ memcpy(self->client.reconnect_key, value, val_len);
+ self->client.key_len = val_len;
+ DEBUG(4, "\n");
+ }
+ /*
+ * FILTER_ENTRY, have we got an ethernet address?
+ */
+ if (strcmp(param, "FILTER_ENTRY") == 0) {
+ bytes = value;
+ DEBUG(4, "Ethernet address = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
+ bytes[5]);
+ for (i = 0; i < 6; i++)
+ self->dev.dev_addr[i] = bytes[i];
+ }
+}
+
+/*
+ * Function irlan_client_get_value_confirm (obj_id, value)
+ *
+ * Got results from remote LM-IAS
+ *
+ */
+void irlan_client_get_value_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
+{
+ struct irlan_cb *self;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(priv != NULL, return;);
+
+ self = (struct irlan_cb *) priv;
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ DEBUG(2, __FUNCTION__ "(), got NULL value!\n");
+ irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL,
+ NULL);
+ return;
+ }
+
+ switch (value->type) {
+ case IAS_INTEGER:
+ self->dtsap_sel_ctrl = value->t.integer;
+
+ if (value->t.integer != -1) {
+ irlan_do_client_event(self, IRLAN_IAS_PROVIDER_AVAIL,
+ NULL);
+ return;
+ }
+ break;
+ case IAS_STRING:
+ DEBUG(2, __FUNCTION__ "(), got string %s\n", value->t.string);
+ break;
+ case IAS_OCT_SEQ:
+ DEBUG(2, __FUNCTION__ "(), OCT_SEQ not implemented\n");
+ break;
+ case IAS_MISSING:
+ DEBUG(2, __FUNCTION__ "(), MISSING not implemented\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), unknown type!\n");
+ break;
+ }
+ irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL);
+}
diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c
new file mode 100644
index 000000000..1544c093e
--- /dev/null
+++ b/net/irda/irlan/irlan_client_event.c
@@ -0,0 +1,527 @@
+/*********************************************************************
+ *
+ * Filename: irlan_client_event.c
+ * Version: 0.9
+ * Description: IrLAN client state machine
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Thu Apr 22 12:23:22 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/timer.h>
+#include <net/irda/irmod.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irttp.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_client.h>
+#include <net/irda/irlan_event.h>
+
+static int irlan_client_state_idle (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_conn (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_info (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_open (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_wait (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_arb (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_data (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_client_state_sync (struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+static int (*state[])(struct irlan_cb *, IRLAN_EVENT event, struct sk_buff *) =
+{
+ irlan_client_state_idle,
+ irlan_client_state_query,
+ irlan_client_state_conn,
+ irlan_client_state_info,
+ irlan_client_state_media,
+ irlan_client_state_open,
+ irlan_client_state_wait,
+ irlan_client_state_arb,
+ irlan_client_state_data,
+ irlan_client_state_close,
+ irlan_client_state_sync
+};
+
+void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ (*state[ self->client.state]) (self, event, skb);
+}
+
+/*
+ * Function irlan_client_state_idle (event, skb, info)
+ *
+ * IDLE, We are waiting for an indication that there is a provider
+ * available.
+ */
+static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_DISCOVERY_INDICATION:
+ /* Get some values from peer IAS */
+ iriap_getvaluebyclass_request(
+ "IrLAN", "IrDA:TinyTP:LsapSel",
+ self->saddr, self->daddr,
+ irlan_client_get_value_confirm, self);
+
+ irlan_next_client_state(self, IRLAN_QUERY);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_query (event, skb, info)
+ *
+ * QUERY, We have queryed the remote IAS and is ready to connect
+ * to provider, just waiting for the confirm.
+ *
+ */
+static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_IAS_PROVIDER_AVAIL:
+ ASSERT(self->dtsap_sel_ctrl != 0, return -1;);
+
+ self->client.open_retries = 0;
+
+ irttp_connect_request(self->client.tsap_ctrl,
+ self->dtsap_sel_ctrl,
+ self->saddr, self->daddr, NULL,
+ IRLAN_MTU, NULL);
+ irlan_next_client_state(self, IRLAN_CONN);
+ break;
+ case IRLAN_IAS_PROVIDER_NOT_AVAIL:
+ DEBUG(2, __FUNCTION__ "(), IAS_PROVIDER_NOT_AVAIL\n");
+ irlan_next_client_state(self, IRLAN_IDLE);
+
+ /* Give the client a kick! */
+ if ((self->access_type == ACCESS_PEER) &&
+ (self->provider.state != IRLAN_IDLE))
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__"(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_conn (event, skb, info)
+ *
+ * CONN, We have connected to a provider but has not issued any
+ * commands yet.
+ *
+ */
+static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_CONNECT_COMPLETE:
+ /* Send getinfo cmd */
+ irlan_get_provider_info(self);
+ irlan_next_client_state(self, IRLAN_INFO);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_info (self, event, skb, info)
+ *
+ * INFO, We have issued a GetInfo command and is awaiting a reply.
+ */
+static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ ASSERT(skb != NULL, return -1;);
+
+ irlan_client_parse_response(self, skb);
+
+ irlan_next_client_state(self, IRLAN_MEDIA);
+
+ irlan_get_media_char(self);
+ break;
+
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_media (self, event, skb, info)
+ *
+ * MEDIA, The irlan_client has issued a GetMedia command and is awaiting a
+ * reply.
+ *
+ */
+static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ irlan_client_parse_response(self, skb);
+ irlan_open_data_channel(self);
+ irlan_next_client_state(self, IRLAN_OPEN);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_open (self, event, skb, info)
+ *
+ * OPEN, The irlan_client has issued a OpenData command and is awaiting a
+ * reply
+ *
+ */
+static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ struct qos_info qos;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ irlan_client_parse_response(self, skb);
+
+ /*
+ * Check if we have got the remote TSAP for data
+ * communications
+ */
+ ASSERT(self->dtsap_sel_data != 0, return -1;);
+
+ /* Check which access type we are dealing with */
+ switch(self->access_type) {
+ case ACCESS_PEER:
+ if (self->provider.state == IRLAN_OPEN) {
+
+ irlan_next_client_state(self, IRLAN_ARB);
+ irlan_do_client_event(self, IRLAN_CHECK_CON_ARB,
+ NULL);
+ } else {
+
+ irlan_next_client_state(self, IRLAN_WAIT);
+ }
+ break;
+ case ACCESS_DIRECT:
+ case ACCESS_HOSTED:
+ qos.link_disc_time.bits = 0x01; /* 3 secs */
+
+ irttp_connect_request(self->tsap_data,
+ self->dtsap_sel_data,
+ self->saddr, self->daddr, &qos,
+ IRLAN_MTU, NULL);
+
+ irlan_next_client_state(self, IRLAN_DATA);
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), unknown access type!\n");
+ break;
+ }
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_wait (self, event, skb, info)
+ *
+ * WAIT, The irlan_client is waiting for the local provider to enter the
+ * provider OPEN state.
+ *
+ */
+static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_PROVIDER_SIGNAL:
+ irlan_next_client_state(self, IRLAN_ARB);
+ irlan_do_client_event(self, IRLAN_CHECK_CON_ARB, NULL);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ struct qos_info qos;
+
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_CHECK_CON_ARB:
+ if (self->client.recv_arb_val == self->provider.send_arb_val) {
+ irlan_next_client_state(self, IRLAN_CLOSE);
+ irlan_close_data_channel(self);
+ } else if (self->client.recv_arb_val <
+ self->provider.send_arb_val)
+ {
+ qos.link_disc_time.bits = 0x01; /* 3 secs */
+
+ irlan_next_client_state(self, IRLAN_DATA);
+ irttp_connect_request(self->tsap_data,
+ self->dtsap_sel_data,
+ self->saddr, self->daddr, &qos,
+ IRLAN_MTU, NULL);
+ } else if (self->client.recv_arb_val >
+ self->provider.send_arb_val)
+ {
+ DEBUG(2, __FUNCTION__ "(), lost the battle :-(\n");
+ }
+ break;
+ case IRLAN_DATA_CONNECT_INDICATION:
+ irlan_next_client_state(self, IRLAN_DATA);
+ break;
+ case IRLAN_LMP_DISCONNECT:
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ case IRLAN_WATCHDOG_TIMEOUT:
+ DEBUG(2, __FUNCTION__ "(), IRLAN_WATCHDOG_TIMEOUT\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_data (self, event, skb, info)
+ *
+ * DATA, The data channel is connected, allowing data transfers between
+ * the local and remote machines.
+ *
+ */
+static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_DATA_INDICATION:
+ irlan_client_parse_response(self, skb);
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_client_state(self, IRLAN_IDLE);
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_close (self, event, skb, info)
+ *
+ *
+ *
+ */
+static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_client_state_sync (self, event, skb, info)
+ *
+ *
+ *
+ */
+static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 90be583f5..6a30574ca 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlan_common.c
- * Version: 0.1
+ * Version: 0.9
* Description: IrDA LAN Access Protocol Implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Tue Jan 19 23:11:30 1999
+ * Modified at: Thu Apr 22 23:13:47 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
@@ -44,89 +44,272 @@
#include <net/irda/timer.h>
#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_client.h>
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_eth.h>
+#include <net/irda/irlan_filter.h>
-static void __irlan_close( struct irlan_cb *self);
+/* extern char sysctl_devname[]; */
/*
* Master structure
*/
hashbin_t *irlan = NULL;
+static __u32 ckey, skey;
+
+/* Module parameters */
+static int eth = 0; /* Use "eth" or "irlan" name for devices */
+static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
+static int timeout = IRLAN_TIMEOUT;
+
+static char *irlan_state[] = {
+ "IRLAN_IDLE",
+ "IRLAN_QUERY",
+ "IRLAN_CONN",
+ "IRLAN_INFO",
+ "IRLAN_MEDIA",
+ "IRLAN_OPEN",
+ "IRLAN_WAIT",
+ "IRLAN_ARB",
+ "IRLAN_DATA",
+ "IRLAN_CLOSE",
+ "IRLAN_SYNC"
+};
-#ifdef CONFIG_PROC_FS
-static int irlan_proc_read( char *buf, char **start, off_t offset,
- int len, int unused);
-
-extern struct proc_dir_entry proc_irda;
+static char *irlan_access[] = {
+ "UNKNOWN",
+ "DIRECT",
+ "PEER",
+ "HOSTED"
+};
-struct proc_dir_entry proc_irlan = {
- 0, 5, "irlan",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL,
- &irlan_proc_read,
+static char *irlan_media[] = {
+ "UNKNOWN",
+ "802.3",
+ "802.5"
};
+
+static void __irlan_close(struct irlan_cb *self);
+static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
+ __u8 value_byte, __u16 value_short,
+ __u8 *value_array, __u16 value_len);
+static void irlan_close_tsaps(struct irlan_cb *self);
+
+#ifdef CONFIG_PROC_FS
+static int irlan_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused);
+
+extern struct proc_dir_entry *proc_irda;
#endif
+void irlan_watchdog_timer_expired(unsigned long data)
+{
+ struct irmanager_event mgr_event;
+ struct irlan_cb *self, *entry;
+
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) data;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if device still configured */
+ if (self->dev.start) {
+ mgr_event.event = EVENT_IRLAN_STOP;
+ sprintf(mgr_event.devname, "%s", self->ifname);
+ irmanager_notify(&mgr_event);
+
+ /*
+ * We set this to false, so that irlan_dev_close known that
+ * notify_irmanager should actually be set to TRUE again
+ * instead of FALSE, since this close has not been initiated
+ * by the user.
+ */
+ self->notify_irmanager = FALSE;
+ } else {
+ DEBUG(0, __FUNCTION__ "(), recycling instance!\n");
+ if (self->netdev_registered) {
+ DEBUG(0, __FUNCTION__ "(), removing netdev!\n");
+ unregister_netdev(&self->dev);
+ self->netdev_registered = FALSE;
+ }
+
+ /* Unbind from daddr */
+ entry = hashbin_remove(irlan, self->daddr, NULL);
+ ASSERT(entry == self, return;);
+
+ self->daddr = DEV_ADDR_ANY;
+ self->saddr = DEV_ADDR_ANY;
+
+ DEBUG(2, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
+ hashbin_insert(irlan, (QUEUE*) self, self->daddr, NULL);
+ }
+}
+
+/*
+ * Function irlan_start_watchdog_timer (self, timeout)
+ *
+ *
+ *
+ */
+void irlan_start_watchdog_timer(struct irlan_cb *self, int timeout)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ irda_start_timer(&self->watchdog_timer, timeout, (unsigned long) self,
+ irlan_watchdog_timer_expired);
+}
+
/*
* Function irlan_init (void)
*
* Initialize IrLAN layer
*
*/
-__initfunc(int irlan_init( void))
+__initfunc(int irlan_init(void))
{
+ struct irlan_cb *new;
+ __u16 hints;
+
+ DEBUG(4, __FUNCTION__"()\n");
+
/* Allocate master array */
- irlan = hashbin_new( HB_LOCAL);
- if ( irlan == NULL) {
- printk( KERN_WARNING "IrLAN: Can't allocate hashbin!\n");
+ irlan = hashbin_new(HB_LOCAL);
+ if (irlan == NULL) {
+ printk(KERN_WARNING "IrLAN: Can't allocate hashbin!\n");
return -ENOMEM;
}
#ifdef CONFIG_PROC_FS
- proc_register( &proc_irda, &proc_irlan);
+ create_proc_entry("irlan", 0, proc_irda)->get_info = irlan_proc_read;
#endif /* CONFIG_PROC_FS */
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ hints = irlmp_service_to_hint(S_LAN);
+
+ /* Register with IrLMP as a client */
+ ckey = irlmp_register_client(hints, irlan_client_discovery_indication,
+ NULL);
+
+ /* Register with IrLMP as a service */
+ skey = irlmp_register_service(hints);
+
+ /* Start the first IrLAN instance */
+ new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY, FALSE);
+
+ irlan_open_data_tsap(new);
+ irlan_client_open_ctrl_tsap(new);
+ irlan_provider_open_ctrl_tsap(new);
+
+ /* Do some fast discovery! */
+ irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
+
return 0;
}
void irlan_cleanup(void)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ irlmp_unregister_client(ckey);
+
+ irlmp_unregister_service(skey);
#ifdef CONFIG_PROC_FS
- proc_unregister( &proc_irda, proc_irlan.low_ino);
-#endif
+ remove_proc_entry("irlan", proc_irda);
+#endif /* CONFIG_PROC_FS */
/*
* Delete hashbin and close all irlan client instances in it
*/
- hashbin_delete( irlan, (FREE_FUNC) __irlan_close);
+ hashbin_delete(irlan, (FREE_FUNC) __irlan_close);
}
+/*
+ * Function irlan_register_netdev (self)
+ *
+ * Registers the network device to be used. We should don't register until
+ * we have been binded to a particular provider or client.
+ */
+int irlan_register_netdev(struct irlan_cb *self)
+{
+ int i=0;
+
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ /* Check if we should call the device eth<x> or irlan<x> */
+ if (!eth) {
+ /* Get the first free irlan<x> name */
+ do {
+ sprintf(self->ifname, "%s%d", "irlan", i++);
+ } while (dev_get(self->ifname) != NULL);
+ }
+ self->dev.name = self->ifname;
+
+ if (register_netdev(&self->dev) != 0) {
+ DEBUG(2, __FUNCTION__ "(), register_netdev() failed!\n");
+ return -1;
+ }
+ self->netdev_registered = TRUE;
+
+ return 0;
+}
/*
* Function irlan_open (void)
*
- *
- *
+ * Open new instance of a client/provider, we should only register the
+ * network device if this instance is ment for a particular client/provider
*/
-struct irlan_cb *irlan_open(void)
+struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr, int netdev)
{
struct irlan_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
/*
* Initialize the irlan structure.
*/
- self = kmalloc( sizeof(struct irlan_cb), GFP_ATOMIC);
- if ( self == NULL)
+ self = kmalloc(sizeof(struct irlan_cb), GFP_ATOMIC);
+ if (self == NULL)
return NULL;
- memset( self, 0, sizeof( struct irlan_cb));
+ memset(self, 0, sizeof(struct irlan_cb));
/*
* Initialize local device structure
*/
self->magic = IRLAN_MAGIC;
+ ASSERT(irlan != NULL, return NULL;);
+
+ sprintf(self->ifname, "%s", "unknown");
+
+ self->dev.priv = (void *) self;
+ self->dev.next = NULL;
+ self->dev.init = irlan_eth_init;
+
+ self->saddr = saddr;
+ self->daddr = daddr;
+
+ /* Provider access can only be PEER, DIRECT, or HOSTED */
+ self->access_type = access;
+ self->media = MEDIA_802_3;
+
+ self->notify_irmanager = TRUE;
+
+ init_timer(&self->watchdog_timer);
+ init_timer(&self->client.kick_timer);
+
+ hashbin_insert(irlan, (QUEUE *) self, daddr, NULL);
+
+ irlan_next_client_state(self, IRLAN_IDLE);
+ irlan_next_provider_state(self, IRLAN_IDLE);
+
+ /* Register network device now, or wait until some later time? */
+ if (netdev)
+ irlan_register_netdev(self);
+
return self;
}
/*
@@ -137,63 +320,283 @@ struct irlan_cb *irlan_open(void)
* hashbin_remove() first!!!
*
*/
-void __irlan_close( struct irlan_cb *self)
+static void __irlan_close(struct irlan_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(0, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- /*
- * Disconnect open TSAP connections
- */
- if ( self->tsap_data) {
- irttp_disconnect_request( self->tsap_data, NULL, P_HIGH);
-
- /* FIXME: this will close the tsap before the disconenct
- * frame has been sent
- */
- /* irttp_close_tsap( self->tsap_data); */
- }
- if ( self->tsap_ctrl) {
- irttp_disconnect_request( self->tsap_ctrl, NULL, P_HIGH);
-
- /* irttp_close_tsap( self->tsap_control); */
+ del_timer(&self->watchdog_timer);
+ del_timer(&self->client.kick_timer);
+
+ /* Close all open connections and remove TSAPs */
+ irlan_close_tsaps(self);
+
+ if (self->netdev_registered) {
+ unregister_netdev(&self->dev);
+ self->netdev_registered = FALSE;
}
- unregister_netdev( &self->dev);
-
- /*
- * Make sure that nobody uses this instance anymore!
- */
self->magic = 0;
-
- /*
- * Dealloacte structure
- */
- kfree( self);
+ kfree(self);
}
/*
* Function irlan_close (self)
*
- *
+ * Close instance
*
*/
-void irlan_close( struct irlan_cb *self)
+void irlan_close(struct irlan_cb *self)
{
struct irlan_cb *entry;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if device is still configured */
+ if (self->dev.start) {
+ DEBUG(2, __FUNCTION__
+ "(), Device still configured, closing later!\n");
+ return;
+ }
+ DEBUG(2, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
+ entry = hashbin_remove(irlan, self->daddr, NULL);
+
+ ASSERT(entry == self, return;);
+
+ __irlan_close(self);
+}
+
+void irlan_connect_indication(void *instance, void *sap, struct qos_info *qos,
+ __u32 max_sdu_size, struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) instance;
+ tsap = (struct tsap_cb *) sap;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+ ASSERT(tsap == self->tsap_data,return;);
+
+ DEBUG(2, "IrLAN, We are now connected!\n");
+ del_timer(&self->watchdog_timer);
+
+ irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
+ irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
+
+ if (self->access_type == ACCESS_PEER) {
+ /*
+ * Data channel is open, so we are now allowed to
+ * configure the remote filter
+ */
+ irlan_get_unicast_addr(self);
+ irlan_open_unicast_addr(self);
+ }
+ /* Ready to transfer Ethernet frames */
+ self->dev.tbusy = 0;
+}
+
+void irlan_connect_confirm(void *instance, void *sap, struct qos_info *qos,
+ __u32 max_sdu_size, struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) instance;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* TODO: we could set the MTU depending on the max_sdu_size */
+
+ DEBUG(2, "IrLAN, We are now connected!\n");
+ del_timer(&self->watchdog_timer);
+
+ /*
+ * Data channel is open, so we are now allowed to configure the remote
+ * filter
+ */
+ irlan_get_unicast_addr(self);
+ irlan_open_unicast_addr(self);
+
+ /* Ready to transfer Ethernet frames */
+ self->dev.tbusy = 0;
+}
+
+/*
+ * Function irlan_client_disconnect_indication (handle)
+ *
+ * Callback function for the IrTTP layer. Indicates a disconnection of
+ * the specified connection (handle)
+ */
+void irlan_disconnect_indication(void *instance, void *sap, LM_REASON reason,
+ struct sk_buff *userdata)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ DEBUG(2, __FUNCTION__ "(), reason=%d\n", reason);
+
+ self = (struct irlan_cb *) instance;
+ tsap = (struct tsap_cb *) sap;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+ ASSERT(tsap != NULL, return;);
+ ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
+
+ ASSERT(tsap == self->tsap_data, return;);
+
+ DEBUG(2, "IrLAN, data channel disconnected by peer!\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ switch(reason) {
+ case LM_USER_REQUEST: /* User request */
+ //irlan_close(self);
+ break;
+ case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */
+ irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
+ break;
+ case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */
+ DEBUG(2, __FUNCTION__ "(), LM_CONNECT_FAILURE not impl\n");
+ break;
+ case LM_LAP_RESET: /* IrLAP reset */
+ DEBUG(2, __FUNCTION__ "(), LM_CONNECT_FAILURE not impl\n");
+ break;
+ case LM_INIT_DISCONNECT:
+ DEBUG(2, __FUNCTION__ "(), LM_CONNECT_FAILURE not impl\n");
+ break;
+ default:
+ break;
+ }
- entry = hashbin_remove( irlan, self->daddr, NULL);
+ /* Stop IP from transmitting more packets */
+ /* irlan_client_flow_indication(handle, FLOW_STOP, priv); */
+
+ irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
+ irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
+}
+
+void irlan_open_data_tsap(struct irlan_cb *self)
+{
+ struct notify_t notify;
+ struct tsap_cb *tsap;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Check if already open */
+ if (self->tsap_data)
+ return;
+
+ irda_notify_init(&notify);
+
+ notify.data_indication = irlan_eth_receive;
+ notify.udata_indication = irlan_eth_receive;
+ notify.connect_indication = irlan_connect_indication;
+ notify.connect_confirm = irlan_connect_confirm;
+ notify.flow_indication = irlan_eth_flow_indication;
+ notify.disconnect_indication = irlan_disconnect_indication;
+ notify.instance = self;
+ strncpy(notify.name, "IrLAN data", NOTIFY_MAX_NAME);
+
+ tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
+ if (!tsap) {
+ DEBUG(2, __FUNCTION__ "(), Got no tsap!\n");
+ return;
+ }
+ self->tsap_data = tsap;
+
+ /*
+ * This is the data TSAP selector which we will pass to the client
+ * when the client ask for it.
+ */
+ self->stsap_sel_data = self->tsap_data->stsap_sel;
+}
+
+void irlan_close_tsaps(struct irlan_cb *self)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * Disconnect and close all open TSAP connections
+ */
+ if (self->tsap_data) {
+ irttp_disconnect_request(self->tsap_data, NULL, P_NORMAL);
+ irttp_close_tsap(self->tsap_data);
+ self->tsap_data = NULL;
+
+ }
+ if (self->client.tsap_ctrl) {
+ irttp_disconnect_request(self->client.tsap_ctrl, NULL,
+ P_NORMAL);
+ irttp_close_tsap(self->client.tsap_ctrl);
+ self->client.tsap_ctrl = NULL;
+ }
+ if (self->provider.tsap_ctrl) {
+ irttp_disconnect_request(self->provider.tsap_ctrl, NULL,
+ P_NORMAL);
+ irttp_close_tsap(self->provider.tsap_ctrl);
+ self->provider.tsap_ctrl = NULL;
+ }
+}
+
+/*
+ * Function irlan_ias_register (self, tsap_sel)
+ *
+ * Register with LM-IAS
+ *
+ */
+void irlan_ias_register(struct irlan_cb *self, __u8 tsap_sel)
+{
+ struct ias_object *obj;
+ struct ias_value *new_value;
- ASSERT( entry == self, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /*
+ * Check if object has already been registred by a previous provider.
+ * If that is the case, we just change the value of the attribute
+ */
+ if (!irias_find_object("IrLAN")) {
+ obj = irias_new_object("IrLAN", IAS_IRLAN_ID);
+ irias_add_integer_attrib(obj, "IrDA:TinyTP:LsapSel", tsap_sel);
+ irias_insert_object(obj);
+ } else {
+ new_value = irias_new_integer_value(tsap_sel);
+ irias_object_change_attribute("IrLAN", "IrDA:TinyTP:LsapSel",
+ new_value);
+ }
- __irlan_close( self);
+ /* Register PnP object only if not registred before */
+ if (!irias_find_object("PnP")) {
+ obj = irias_new_object("PnP", IAS_PNP_ID);
+#if 0
+ irias_add_string_attrib(obj, "Name", sysctl_devname);
+#else
+ irias_add_string_attrib(obj, "Name", "Linux");
+#endif
+ irias_add_string_attrib(obj, "DeviceID", "HWP19F0");
+ irias_add_integer_attrib(obj, "CompCnt", 2);
+ irias_add_string_attrib(obj, "Comp#01", "PNP8294");
+ irias_add_string_attrib(obj, "Comp#02", "PNP8389");
+ irias_add_string_attrib(obj, "Manufacturer", "Linux-IrDA Project");
+ irias_insert_object(obj);
+ }
}
/*
@@ -202,33 +605,30 @@ void irlan_close( struct irlan_cb *self)
* Send Get Provider Information command to peer IrLAN layer
*
*/
-void irlan_get_provider_info( struct irlan_cb *self)
+void irlan_get_provider_info(struct irlan_cb *self)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an skb of length %d\n", 64);
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
frame[0] = CMD_GET_PROVIDER_INFO;
frame[1] = 0x00; /* Zero parameters */
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
@@ -237,26 +637,22 @@ void irlan_get_provider_info( struct irlan_cb *self)
* Send an Open Data Command to provider
*
*/
-void irlan_open_data_channel( struct irlan_cb *self)
+void irlan_open_data_channel(struct irlan_cb *self)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an skb of length %d\n", 64);
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
- /* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
@@ -264,13 +660,41 @@ void irlan_open_data_channel( struct irlan_cb *self)
frame[0] = CMD_OPEN_DATA_CHANNEL;
frame[1] = 0x02; /* Two parameters */
- insert_string_param( skb, "MEDIA", "802.3");
- insert_string_param( skb, "ACCESS_TYPE", "DIRECT");
- /* insert_string_param( skb, "MODE", "UNRELIABLE"); */
+ irlan_insert_string_param(skb, "MEDIA", "802.3");
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
+ /* irlan_insert_string_param(skb, "MODE", "UNRELIABLE"); */
/* self->use_udata = TRUE; */
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
+}
+
+void irlan_close_data_channel(struct irlan_cb *self)
+{
+ struct sk_buff *skb;
+ __u8 *frame;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = dev_alloc_skb(64);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
+
+ frame = skb->data;
+
+ /* Build frame */
+ frame[0] = CMD_CLOSE_DATA_CHAN;
+ frame[1] = 0x01; /* Two parameters */
+
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
@@ -280,84 +704,74 @@ void irlan_open_data_channel( struct irlan_cb *self)
* address.
*
*/
-void irlan_open_unicast_addr( struct irlan_cb *self)
+void irlan_open_unicast_addr(struct irlan_cb *self)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb( 128);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an skb of length %d\n", 64);
+ skb = dev_alloc_skb(128);
+ if (!skb)
return;
- }
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
frame[0] = CMD_FILTER_OPERATION;
frame[1] = 0x03; /* Three parameters */
- insert_byte_param( skb, "DATA_CHAN" , self->dtsap_sel_data);
- insert_string_param( skb, "FILTER_TYPE", "DIRECTED");
- insert_string_param( skb, "FILTER_MODE", "FILTER");
+ irlan_insert_byte_param(skb, "DATA_CHAN" , self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
+ irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
* Function irlan_set_broadcast_filter (self, status)
*
* Make IrLAN provider accept ethernet frames addressed to the broadcast
- * address. Be careful with the use of this one, sice there may be a lot
+ * address. Be careful with the use of this one, since there may be a lot
* of broadcast traffic out there. We can still function without this
* one but then _we_ have to initiate all communication with other
- * hosts, sice ARP request for this host will not be answered.
+ * hosts, since ARP request for this host will not be answered.
*/
-void irlan_set_broadcast_filter( struct irlan_cb *self, int status)
+void irlan_set_broadcast_filter(struct irlan_cb *self, int status)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- /* Should only be used by client */
- if (!self->client)
+ skb = dev_alloc_skb(128);
+ if (!skb)
return;
- skb = dev_alloc_skb( 128);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an skb of length %d\n", 64);
- return;
- }
-
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
frame[0] = CMD_FILTER_OPERATION;
frame[1] = 0x03; /* Three parameters */
- insert_byte_param( skb, "DATA_CHAN", self->dtsap_sel_data);
- insert_string_param( skb, "FILTER_TYPE", "BROADCAST");
- if ( status)
- insert_string_param( skb, "FILTER_MODE", "FILTER");
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
+ if (status)
+ irlan_insert_string_param(skb, "FILTER_MODE", "FILTER");
else
- insert_string_param( skb, "FILTER_MODE", "NONE");
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
@@ -367,43 +781,36 @@ void irlan_set_broadcast_filter( struct irlan_cb *self, int status)
* address.
*
*/
-void irlan_set_multicast_filter( struct irlan_cb *self, int status)
+void irlan_set_multicast_filter(struct irlan_cb *self, int status)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- /* Should only be used by client */
- if (!self->client)
+ skb = dev_alloc_skb(128);
+ if (!skb)
return;
- skb = dev_alloc_skb( 128);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate an skb of length %d\n", 64);
- return;
- }
-
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
frame[0] = CMD_FILTER_OPERATION;
frame[1] = 0x03; /* Three parameters */
- insert_byte_param( skb, "DATA_CHAN", self->dtsap_sel_data);
- insert_string_param( skb, "FILTER_TYPE", "MULTICAST");
- if ( status)
- insert_string_param( skb, "FILTER_MODE", "ALL");
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
+ if (status)
+ irlan_insert_string_param(skb, "FILTER_MODE", "ALL");
else
- insert_string_param( skb, "FILTER_MODE", "NONE");
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
@@ -414,36 +821,33 @@ void irlan_set_multicast_filter( struct irlan_cb *self, int status)
* can construct its packets.
*
*/
-void irlan_get_unicast_addr( struct irlan_cb *self)
+void irlan_get_unicast_addr(struct irlan_cb *self)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb( 128);
- if (skb == NULL) {
- DEBUG( 0, "irlan_client_get_unicast_addr: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb(128);
+ if (!skb)
return;
- }
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
frame[0] = CMD_FILTER_OPERATION;
frame[1] = 0x03; /* Three parameters */
- insert_byte_param( skb, "DATA_CHAN", self->dtsap_sel_data);
- insert_string_param( skb, "FILTER_TYPE", "DIRECTED");
- insert_string_param( skb, "FILTER_OPERATION", "DYNAMIC");
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->dtsap_sel_data);
+ irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
+ irlan_insert_string_param(skb, "FILTER_OPERATION", "DYNAMIC");
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
@@ -452,26 +856,23 @@ void irlan_get_unicast_addr( struct irlan_cb *self)
*
*
*/
-void irlan_get_media_char( struct irlan_cb *self)
+void irlan_get_media_char(struct irlan_cb *self)
{
struct sk_buff *skb;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0,"irlan_server_get_media_char: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
/* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
frame = skb->data;
@@ -479,9 +880,9 @@ void irlan_get_media_char( struct irlan_cb *self)
frame[0] = CMD_GET_MEDIA_CHAR;
frame[1] = 0x01; /* One parameter */
- insert_string_param( skb, "MEDIA", "802.3");
+ irlan_insert_string_param(skb, "MEDIA", "802.3");
- irttp_data_request( self->tsap_ctrl, skb);
+ irttp_data_request(self->client.tsap_ctrl, skb);
}
/*
@@ -490,47 +891,14 @@ void irlan_get_media_char( struct irlan_cb *self)
* Insert byte parameter into frame
*
*/
-int insert_byte_param( struct sk_buff *skb, char *param, __u8 value)
+int irlan_insert_byte_param(struct sk_buff *skb, char *param, __u8 value)
{
- __u8 *frame;
- __u8 len_param;
- __u16 len_value;
- int n=0;
-
- if ( skb == NULL) {
- DEBUG( 0, "insert_param: Got NULL skb\n");
- return 0;
- }
-
- len_param = strlen( param);
- len_value = 1;
-
- /*
- * Insert at end of sk-buffer
- */
- frame = skb->tail;
-
- /* Make space for data */
- if ( skb_tailroom(skb) < (len_param+len_value+3)) {
- DEBUG( 0, "insert_param: No more space at end of skb\n");
- return 0;
- }
- skb_put( skb, len_param+len_value+3);
-
- /* Insert parameter length */
- frame[n++] = len_param;
-
- /* Insert parameter */
- memcpy( frame+n, param, len_param);
- n += len_param;
-
- /* Insert value length ( 2 byte little endian format, LSB first) */
- frame[n++] = len_value & 0xff;
- frame[n++] = len_value >> 8;
+ return __irlan_insert_param(skb, param, IRLAN_BYTE, value, 0, NULL, 0);
+}
- frame[n++] = value;
-
- return len_param+len_value+3;
+int irlan_insert_short_param(struct sk_buff *skb, char *param, __u16 value)
+{
+ return __irlan_insert_param(skb, param, IRLAN_SHORT, 0, value, NULL, 0);
}
/*
@@ -539,96 +907,25 @@ int insert_byte_param( struct sk_buff *skb, char *param, __u8 value)
* Insert string parameter into frame
*
*/
-int insert_string_param( struct sk_buff *skb, char *param, char *value)
+int irlan_insert_string_param(struct sk_buff *skb, char *param, char *string)
{
- __u8 *frame;
- __u8 len_param;
- __u16 len_value;
- int n=0;
-
- if ( skb == NULL) {
- DEBUG( 0, "insert_param: Got NULL skb\n");
- return 0;
- }
- len_param = strlen( param);
- len_value = strlen( value);
+ int string_len = strlen(string);
- /*
- * Insert at end of sk-buffer
- */
- frame = skb->tail;
-
- /* Make space for data */
- if ( skb_tailroom(skb) < (len_param+len_value+3)) {
- DEBUG( 0, "insert_param: No more space at end of skb\n");
- return 0;
- }
- skb_put( skb, len_param+len_value+3);
-
- /* Insert parameter length */
- frame[n++] = len_param;
-
- /* Insert parameter */
- memcpy( frame+n, param, len_param);
- n += len_param;
-
- /* Insert value length ( 2 byte little endian format, LSB first) */
- frame[n++] = len_value & 0xff;
- frame[n++] = len_value >> 8;
-
- memcpy( frame+n, value, len_value);
- n+=len_value;
-
- return len_param+len_value+3;
+ return __irlan_insert_param(skb, param, IRLAN_ARRAY, 0, 0, string,
+ string_len);
}
/*
- * Function insert_array_param( skb, param, value, len_value)
+ * Function insert_array_param(skb, param, value, len_value)
*
* Insert array parameter into frame
*
*/
-int insert_array_param( struct sk_buff *skb, char *name, __u8 *value,
- __u16 value_len)
+int irlan_insert_array_param(struct sk_buff *skb, char *name, __u8 *array,
+ __u16 array_len)
{
- __u8 *frame;
- __u8 name_len;
- int n=0;
-
- if ( skb == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Got NULL skb\n");
- return 0;
- }
- name_len = strlen( name);
-
- /*
- * Insert at end of sk-buffer
- */
- frame = skb->tail;
-
- /* Make space for data */
- if ( skb_tailroom(skb) < (name_len+value_len+3)) {
- DEBUG( 0, __FUNCTION__ "(), No more space at end of skb\n");
- return 0;
- }
- skb_put( skb, name_len+value_len+3);
-
- /* Insert parameter length */
- frame[n++] = name_len;
-
- /* Insert parameter */
- memcpy( frame+n, name, name_len);
- n += name_len;
-
- /* Insert value length ( 2 byte little endian format, LSB first) */
- /* FIXME: should we use htons() here? */
- frame[n++] = value_len & 0xff;
- frame[n++] = value_len >> 8;
-
- memcpy( frame+n, value, value_len);
- n+=value_len;
-
- return name_len+value_len+3;
+ return __irlan_insert_param(skb, name, IRLAN_ARRAY, 0, 0, array,
+ array_len);
}
/*
@@ -640,130 +937,125 @@ int insert_array_param( struct sk_buff *skb, char *name, __u8 *value,
* | Name Length[1] | Param Name[1..255] | Val Length[2] | Value[0..1016]|
* -----------------------------------------------------------------------
*/
-int insert_param( struct sk_buff *skb, char *param, int type, char *value_char,
- __u8 value_byte, __u16 value_short)
+static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
+ __u8 value_byte, __u16 value_short,
+ __u8 *value_array, __u16 value_len)
{
__u8 *frame;
- __u8 len_param;
- __u16 len_value;
- int n;
+ __u8 param_len;
+ __u16 tmp_le; /* Temporary value in little endian format */
+ int n=0;
- if ( skb == NULL) {
- DEBUG( 0, "insert_param: Got NULL skb\n");
+ if (skb == NULL) {
+ DEBUG(2, __FUNCTION__ "(), Got NULL skb\n");
return 0;
- }
-
- n = 0;
+ }
- len_param = strlen( param);
- switch ( type) {
- case 1:
- ASSERT( value_char != NULL, return 0;);
- len_value = strlen( value_char);
+ param_len = strlen(param);
+ switch (type) {
+ case IRLAN_BYTE:
+ value_len = 1;
break;
- case 2:
- len_value = 1;
+ case IRLAN_SHORT:
+ value_len = 2;
break;
- case 3:
- len_value = 2;
+ case IRLAN_ARRAY:
+ ASSERT(value_array != NULL, return 0;);
+ ASSERT(value_len > 0, return 0;);
break;
default:
- DEBUG( 0, "Error in insert_param!\n");
+ DEBUG(2, __FUNCTION__ "(), Unknown parameter type!\n");
return 0;
break;
}
- /*
- * Insert at end of sk-buffer
- */
+ /* Insert at end of sk-buffer */
frame = skb->tail;
/* Make space for data */
- if ( skb_tailroom(skb) < (len_param+len_value+3)) {
- DEBUG( 0, "insert_param: No more space at end of skb\n");
+ if (skb_tailroom(skb) < (param_len+value_len+3)) {
+ DEBUG(2, __FUNCTION__ "(), No more space at end of skb\n");
return 0;
}
- skb_put( skb, len_param+len_value+3);
-
+ skb_put(skb, param_len+value_len+3);
+
/* Insert parameter length */
- frame[n++] = len_param;
+ frame[n++] = param_len;
/* Insert parameter */
- memcpy( frame+n, param, len_param); n += len_param;
+ memcpy(frame+n, param, param_len); n += param_len;
- /* Insert value length ( 2 byte little endian format, LSB first) */
- frame[n++] = len_value & 0xff;
- frame[n++] = len_value >> 8;
+ /* Insert value length (2 byte little endian format, LSB first) */
+ tmp_le = cpu_to_le16(value_len);
+ memcpy(frame+n, &tmp_le, 2); n += 2; /* To avoid alignment problems */
/* Insert value */
switch (type) {
- case 1:
- memcpy( frame+n, value_char, len_value); n+=len_value;
- break;
- case 2:
+ case IRLAN_BYTE:
frame[n++] = value_byte;
break;
- case 3:
- frame[n++] = value_short & 0xff;
- frame[n++] = (value_short >> 8) & 0xff;
+ case IRLAN_SHORT:
+ tmp_le = cpu_to_le16(value_short);
+ memcpy(frame+n, &tmp_le, 2); n += 2;
+ break;
+ case IRLAN_ARRAY:
+ memcpy(frame+n, value_array, value_len); n+=value_len;
break;
default:
break;
}
- ASSERT( n == (len_param+len_value+3), return 0;);
+ ASSERT(n == (param_len+value_len+3), return 0;);
- return len_param+len_value+3;
+ return param_len+value_len+3;
}
/*
- * Function irlan_get_response_param (buf, param, value)
+ * Function irlan_extract_param (buf, name, value, len)
*
* Extracts a single parameter name/value pair from buffer and updates
- * the buffer pointer to point to the next name/value pair.
- *
+ * the buffer pointer to point to the next name/value pair.
*/
-int irlan_get_response_param( __u8 *buf, char *name, char *value, int *len)
+int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
{
__u8 name_len;
__u16 val_len;
int n=0;
-
- DEBUG( 4, "irlan_get_response_param()\n");
- /* get length of parameter name ( 1 byte) */
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ /* get length of parameter name (1 byte) */
name_len = buf[n++];
if (name_len > 254) {
- DEBUG( 0, __FUNCTION__ "(), name_len > 254\n");
- return -1;
+ DEBUG(2, __FUNCTION__ "(), name_len > 254\n");
+ return -RSP_INVALID_COMMAND_FORMAT;
}
/* get parameter name */
- memcpy( name, buf+n, name_len);
+ memcpy(name, buf+n, name_len);
name[ name_len] = '\0';
n+=name_len;
/*
- * Get length of parameter value ( 2 bytes in little endian
+ * Get length of parameter value (2 bytes in little endian
* format)
*/
- val_len = buf[n++] & 0xff;
- val_len |= buf[n++] << 8;
+ memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
+ le16_to_cpus(&val_len); n+=2;
if (val_len > 1016) {
- DEBUG( 0, __FUNCTION__ "(), parameter length to long\n");
- return -1;
+ DEBUG(2, __FUNCTION__ "(), parameter length to long\n");
+ return -RSP_INVALID_COMMAND_FORMAT;
}
-
*len = val_len;
/* get parameter value */
- memcpy( value, buf+n, val_len);
+ memcpy(value, buf+n, val_len);
value[ val_len] = '\0';
n+=val_len;
- DEBUG( 4, "Parameter: %s ", name);
- DEBUG( 4, "Value: %s\n", value);
+ DEBUG(4, "Parameter: %s ", name);
+ DEBUG(4, "Value: %s\n", value);
return n;
}
@@ -774,37 +1066,55 @@ int irlan_get_response_param( __u8 *buf, char *name, char *value, int *len)
*
* Give some info to the /proc file system
*/
-static int irlan_proc_read( char *buf, char **start, off_t offset,
- int len, int unused)
+static int irlan_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused)
{
struct irlan_cb *self;
-
- ASSERT( irlan != NULL, return 0;);
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ ASSERT(irlan != NULL, return 0;);
len = 0;
- len += sprintf( buf+len, "IrLAN\n");
-
- self = ( struct irlan_cb *) hashbin_get_first( irlan);
- while ( self != NULL) {
- ASSERT( self->magic == IRLAN_MAGIC, return len;);
-
- len += sprintf( buf+len, "ifname: %s, ",
- self->ifname);
- /* len += sprintf( buf+len, "state: %s, ", */
-/* irlan_client_state[ self->state]); */
- len += sprintf( buf+len, "saddr: %#08x\n",
- self->saddr);
- len += sprintf( buf+len, "daddr: %#08x\n",
- self->daddr);
- len += sprintf( buf+len, "tbusy: %s\n", self->dev.tbusy ?
- "TRUE" : "FALSE");
+ len += sprintf(buf+len, "IrLAN instances:\n");
+
+ self = (struct irlan_cb *) hashbin_get_first(irlan);
+ while (self != NULL) {
+ ASSERT(self->magic == IRLAN_MAGIC, return len;);
- len += sprintf( buf+len, "\n");
-
- self = ( struct irlan_cb *) hashbin_get_next( irlan);
- DEBUG( 4, "self=%p\n", self);
+ len += sprintf(buf+len, "ifname: %s,\n",
+ self->ifname);
+ len += sprintf(buf+len, "client state: %s, ",
+ irlan_state[ self->client.state]);
+ len += sprintf(buf+len, "provider state: %s,\n",
+ irlan_state[ self->provider.state]);
+ len += sprintf(buf+len, "saddr: %#08x, ",
+ self->saddr);
+ len += sprintf(buf+len, "daddr: %#08x\n",
+ self->daddr);
+ len += sprintf(buf+len, "version: %d.%d,\n",
+ self->version[1], self->version[0]);
+ len += sprintf(buf+len, "access type: %s\n",
+ irlan_access[ self->access_type]);
+ len += sprintf(buf+len, "media: %s\n",
+ irlan_media[ self->media]);
+
+ len += sprintf(buf+len, "local filter:\n");
+ len += sprintf(buf+len, "remote filter: ");
+ len += irlan_print_filter(self->client.filter_type, buf+len);
+
+ len += sprintf(buf+len, "tx busy: %s\n", self->dev.tbusy ?
+ "TRUE" : "FALSE");
+
+ len += sprintf(buf+len, "\n");
+
+ self = (struct irlan_cb *) hashbin_get_next(irlan);
}
+ restore_flags(flags);
+
return len;
}
#endif
@@ -815,50 +1125,68 @@ static int irlan_proc_read( char *buf, char **start, off_t offset,
* Print return code of request to peer IrLAN layer.
*
*/
-void print_ret_code( __u8 code)
+void print_ret_code(__u8 code)
{
- switch( code) {
+ switch(code) {
case 0:
- printk( KERN_INFO "Success\n");
+ printk(KERN_INFO "Success\n");
break;
case 1:
- printk( KERN_WARNING "Insufficient resources\n");
+ printk(KERN_WARNING "Insufficient resources\n");
break;
case 2:
- printk( KERN_WARNING "Invalid command format\n");
+ printk(KERN_WARNING "Invalid command format\n");
break;
case 3:
- printk( KERN_WARNING "Command not supported\n");
+ printk(KERN_WARNING "Command not supported\n");
break;
case 4:
- printk( KERN_WARNING "Parameter not supported\n");
+ printk(KERN_WARNING "Parameter not supported\n");
break;
case 5:
- printk( KERN_WARNING "Value not supported\n");
+ printk(KERN_WARNING "Value not supported\n");
break;
case 6:
- printk( KERN_WARNING "Not open\n");
+ printk(KERN_WARNING "Not open\n");
break;
case 7:
- printk( KERN_WARNING "Authentication required\n");
+ printk(KERN_WARNING "Authentication required\n");
break;
case 8:
- printk( KERN_WARNING "Invalid password\n");
+ printk(KERN_WARNING "Invalid password\n");
break;
case 9:
- printk( KERN_WARNING "Protocol error\n");
+ printk(KERN_WARNING "Protocol error\n");
break;
case 255:
- printk( KERN_WARNING "Asynchronous status\n");
+ printk(KERN_WARNING "Asynchronous status\n");
break;
}
}
+void irlan_mod_inc_use_count(void)
+{
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+}
+
+void irlan_mod_dec_use_count(void)
+{
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+}
+
#ifdef MODULE
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("The Linux IrDA LAN protocol");
+MODULE_PARM(eth, "i");
+MODULE_PARM(access, "i");
+MODULE_PARM(timeout, "i");
+
/*
* Function init_module (void)
*
@@ -867,11 +1195,7 @@ MODULE_DESCRIPTION("The Linux IrDA LAN protocol");
*/
int init_module(void)
{
- DEBUG( 4, __FUNCTION__ "(), irlan.c\n");
-
- irlan_init();
-
- return 0;
+ return irlan_init();
}
/*
@@ -882,15 +1206,9 @@ int init_module(void)
*/
void cleanup_module(void)
{
- DEBUG( 4, "--> irlan, cleanup_module\n");
- /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
-
/* Free some memory */
irlan_cleanup();
-
- DEBUG( 4, "irlan, cleanup_module -->\n");
}
#endif /* MODULE */
-
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index c84aa0aa9..c1965a117 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Oct 15 08:37:58 1998
- * Modified at: Wed Dec 9 11:14:53 1998
+ * Modified at: Thu Apr 22 14:26:39 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
@@ -27,66 +27,107 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_client.h>
+#include <net/irda/irlan_event.h>
#include <net/irda/irlan_eth.h>
/*
* Function irlan_eth_init (dev)
*
- * The network device initialization function. Called only once.
+ * The network device initialization function.
*
*/
-int irlan_eth_init( struct device *dev)
+int irlan_eth_init(struct device *dev)
{
+ struct irmanager_event mgr_event;
struct irlan_cb *self;
- DEBUG( 4, __FUNCTION__"()\n");
+ DEBUG(0, __FUNCTION__"()\n");
- ASSERT( dev != NULL, return -1;);
+ ASSERT(dev != NULL, return -1;);
self = (struct irlan_cb *) dev->priv;
-/* dev->open = irlan_eth_open; */
-/* dev->stop = irlan_eth_close; */
-
- dev->hard_start_xmit = irlan_eth_tx;
+ dev->open = irlan_eth_open;
+ dev->stop = irlan_eth_close;
+ dev->hard_start_xmit = irlan_eth_xmit;
dev->get_stats = irlan_eth_get_stats;
dev->set_multicast_list = irlan_eth_set_multicast_list;
dev->tbusy = 1;
- ether_setup( dev);
+ ether_setup(dev);
dev->tx_queue_len = TTP_MAX_QUEUE;
+#if 0
+ /*
+ * OK, since we are emulating an IrLAN sever we will have to give
+ * ourself an ethernet address!
+ * FIXME: this must be more dynamically
+ */
+ dev->dev_addr[0] = 0x40;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x00;
+ dev->dev_addr[3] = 0x00;
+ dev->dev_addr[4] = 0x23;
+ dev->dev_addr[5] = 0x45;
+#endif
+ /*
+ * Network device has now been registered, so tell irmanager about
+ * it, so it can be configured with network parameters
+ */
+ mgr_event.event = EVENT_IRLAN_START;
+ sprintf(mgr_event.devname, "%s", self->ifname);
+ irmanager_notify(&mgr_event);
+
+ /*
+ * We set this so that we only notify once, since if
+ * configuration of the network device fails, the user
+ * will have to sort it out first anyway. No need to
+ * try again.
+ */
+ self->notify_irmanager = FALSE;
+
return 0;
}
/*
* Function irlan_eth_open (dev)
*
- * Start the IrLAN ether network device, this function will be called by
- * "ifconfig irlan0 up".
+ * Network device has been opened by user
*
*/
-int irlan_eth_open( struct device *dev)
+int irlan_eth_open(struct device *dev)
{
- /* struct irlan_cb *self = (struct irlan_cb *) dev->priv; */
+ struct irlan_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ ASSERT(dev != NULL, return -1;);
- ASSERT( dev != NULL, return -1;);
+ self = (struct irlan_cb *) dev->priv;
+
+ ASSERT(self != NULL, return -1;);
/* Ready to play! */
- dev->tbusy = 0;
+/* dev->tbusy = 0; */ /* Wait until data link is ready */
dev->interrupt = 0;
dev->start = 1;
- /* MOD_INC_USE_COUNT; */
+ self->notify_irmanager = TRUE;
+
+ /* We are now open, so time to do some work */
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+
+ irlan_mod_inc_use_count();
return 0;
}
@@ -94,21 +135,38 @@ int irlan_eth_open( struct device *dev)
/*
* Function irlan_eth_close (dev)
*
- * Stop the Client ether network device, his function will be called by
- * ifconfig down.
+ * Stop the ether network device, his function will usually be called by
+ * ifconfig down. We should now disconnect the link, We start the
+ * close timer, so that the instance will be removed if we are unable
+ * to discover the remote device after the disconnect.
*/
int irlan_eth_close(struct device *dev)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( dev != NULL, return -1;);
+ struct irlan_cb *self = (struct irlan_cb *) dev->priv;
+
+ DEBUG(0, __FUNCTION__ "()\n");
/* Stop device */
dev->tbusy = 1;
dev->start = 0;
- /* MOD_DEC_USE_COUNT; */
+ irlan_mod_dec_use_count();
+
+ irlan_close_data_channel(self);
+
+ irlan_close_tsaps(self);
+
+ irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
+ irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
+ irlan_start_watchdog_timer(self, IRLAN_TIMEOUT);
+
+ /* Device closed by user! */
+ if (self->notify_irmanager)
+ self->notify_irmanager = FALSE;
+ else
+ self->notify_irmanager = TRUE;
+
return 0;
}
@@ -118,75 +176,45 @@ int irlan_eth_close(struct device *dev)
* Transmits ethernet frames over IrDA link.
*
*/
-int irlan_eth_tx( struct sk_buff *skb, struct device *dev)
+int irlan_eth_xmit(struct sk_buff *skb, struct device *dev)
{
struct irlan_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
self = (struct irlan_cb *) dev->priv;
- ASSERT( self != NULL, return 0;);
- ASSERT( self->magic == IRLAN_MAGIC, return 0;);
+ ASSERT(self != NULL, return 0;);
+ ASSERT(self->magic == IRLAN_MAGIC, return 0;);
- if ( dev->tbusy) {
+ /* Lock transmit buffer */
+ if (irda_lock((void *) &dev->tbusy) == FALSE) {
/*
* If we get here, some higher level has decided we are broken.
* There should really be a "kick me" function call instead.
*/
int tickssofar = jiffies - dev->trans_start;
- DEBUG( 4, __FUNCTION__ "(), tbusy==TRUE\n");
- if ( tickssofar < 5)
+ if (tickssofar < 5)
return -EBUSY;
dev->tbusy = 0;
dev->trans_start = jiffies;
}
- /*
- * If some higher layer thinks we've missed an tx-done interrupt
- * we are passed NULL. Caution: dev_tint() handles the cli()/sti()
- * itself.
- */
- if ( skb == NULL) {
- DEBUG( 0, __FUNCTION__ "(), skb==NULL\n");
-
- return 0;
- }
- /*
- * Check that we are connected
- */
- if ( !self->connected) {
- DEBUG( 4, __FUNCTION__ "(), Not connected, dropping frame!\n");
-
- dev_kfree_skb( skb);
- ++self->stats.tx_dropped;
-
- return 0;
- }
- /*
- * Block a timer-based transmit from overlapping. This could better be
- * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
- */
- if ( test_and_set_bit(0, (void*) &dev->tbusy) != 0) {
- printk( KERN_WARNING "%s: Transmitter access conflict.\n",
- dev->name);
- return 0;
- }
- DEBUG( 4, "Room left at head: %d\n", skb_headroom(skb));
- DEBUG( 4, "Room left at tail: %d\n", skb_tailroom(skb));
- DEBUG( 4, "Required room: %d\n", IRLAN_MAX_HEADER);
+ DEBUG(4, "Room left at head: %d\n", skb_headroom(skb));
+ DEBUG(4, "Room left at tail: %d\n", skb_tailroom(skb));
+ DEBUG(4, "Required room: %d\n", IRLAN_MAX_HEADER);
- /* Skb headroom large enough to contain IR-headers? */
- if (( skb_headroom( skb) < IRLAN_MAX_HEADER) || ( skb_shared( skb))) {
+ /* skb headroom large enough to contain IR-headers? */
+ if ((skb_headroom(skb) < IRLAN_MAX_HEADER) || (skb_shared(skb))) {
struct sk_buff *new_skb =
skb_realloc_headroom(skb, IRLAN_MAX_HEADER);
- ASSERT( new_skb != NULL, return 0;);
- ASSERT( skb_headroom( new_skb) >= IRLAN_MAX_HEADER, return 0;);
+ ASSERT(new_skb != NULL, return 0;);
+ ASSERT(skb_headroom(new_skb) >= IRLAN_MAX_HEADER, return 0;);
/* Free original skb, and use the new one */
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
skb = new_skb;
}
@@ -198,14 +226,14 @@ int irlan_eth_tx( struct sk_buff *skb, struct device *dev)
* Now queue the packet in the transport layer
* FIXME: clean up the code below! DB
*/
- if ( self->use_udata) {
- irttp_udata_request( self->tsap_data, skb);
+ if (self->use_udata) {
+ irttp_udata_request(self->tsap_data, skb);
dev->tbusy = 0;
return 0;
}
- if ( irttp_data_request( self->tsap_data, skb) == -1) {
+ if (irttp_data_request(self->tsap_data, skb) == -1) {
/*
* IrTTPs tx queue is full, so we just have to drop the
* frame! You might think that we should just return -1
@@ -215,53 +243,51 @@ int irlan_eth_tx( struct sk_buff *skb, struct device *dev)
* really confuse do_dev_queue_xmit() in dev.c! I have
* tried :-) DB
*/
- DEBUG( 4, __FUNCTION__ "(), Dropping frame\n");
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
++self->stats.tx_dropped;
return 0;
}
- dev->tbusy = 0;
+ dev->tbusy = 0; /* Finished! */
return 0;
}
/*
- * Function irlan_eth_rx (handle, skb)
+ * Function irlan_eth_receive (handle, skb)
*
* This function gets the data that is received on the data channel
*
*/
-void irlan_eth_rx( void *instance, void *sap, struct sk_buff *skb)
+int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
{
struct irlan_cb *self;
- self = ( struct irlan_cb *) instance;
+ self = (struct irlan_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return 0;);
+ ASSERT(self->magic == IRLAN_MAGIC, return 0;);
if (skb == NULL) {
++self->stats.rx_dropped;
- return;
+ return 0;
}
- IS_SKB( skb, return;);
- ASSERT( skb->len > 1, return;);
+ ASSERT(skb->len > 1, return 0;);
- DEBUG( 4, "Got some ether data: length=%d\n", (int)skb->len);
-
/*
* Adopt this frame! Important to set all these fields since they
* might have been previously set by the low level IrDA network
* device driver
*/
skb->dev = &self->dev;
- skb->protocol=eth_type_trans( skb, skb->dev); /* Remove eth header */
+ skb->protocol=eth_type_trans(skb, skb->dev); /* Remove eth header */
- netif_rx( skb); /* Eat it! */
+ netif_rx(skb); /* Eat it! */
self->stats.rx_packets++;
self->stats.rx_bytes += skb->len;
+
+ return 0;
}
/*
@@ -270,25 +296,25 @@ void irlan_eth_rx( void *instance, void *sap, struct sk_buff *skb)
* Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by
* controlling the dev->tbusy variable.
*/
-void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow)
+void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
{
struct irlan_cb *self;
struct device *dev;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
self = (struct irlan_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
dev = &self->dev;
- ASSERT( dev != NULL, return;);
+ ASSERT(dev != NULL, return;);
- switch ( flow) {
+ switch (flow) {
case FLOW_STOP:
- DEBUG( 4, "IrLAN, stopping Ethernet layer\n");
+ DEBUG(4, "IrLAN, stopping Ethernet layer\n");
dev->tbusy = 1;
break;
@@ -296,7 +322,7 @@ void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow)
/*
* Tell upper layers that its time to transmit frames again
*/
- DEBUG( 4, "IrLAN, starting Ethernet layer\n");
+ DEBUG(4, "IrLAN, starting Ethernet layer\n");
dev->tbusy = 0;
@@ -304,10 +330,10 @@ void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow)
* Ready to receive more frames, so schedule the network
* layer
*/
- mark_bh( NET_BH);
+ mark_bh(NET_BH);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown flow command!\n");
+ DEBUG(0, __FUNCTION__ "(), Unknown flow command!\n");
}
}
@@ -317,65 +343,89 @@ void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow)
* If we don't want to use ARP. Currently not used!!
*
*/
-void irlan_eth_rebuild_header( void *buff, struct device *dev,
- unsigned long dest, struct sk_buff *skb)
+void irlan_eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dest, struct sk_buff *skb)
{
- struct ethhdr *eth = ( struct ethhdr *) buff;
+ struct ethhdr *eth = (struct ethhdr *) buff;
- memcpy( eth->h_source, dev->dev_addr, dev->addr_len);
- memcpy( eth->h_dest, dev->dev_addr, dev->addr_len);
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+ memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
/* return 0; */
}
/*
+ * Function irlan_etc_send_gratuitous_arp (dev)
+ *
+ * Send gratuitous ARP to announce that we have changed
+ * hardware address, so that all peers updates their ARP tables
+ */
+void irlan_etc_send_gratuitous_arp(struct device *dev)
+{
+ struct in_device *in_dev;
+
+ /*
+ * When we get a new MAC address do a gratuitous ARP. This
+ * is useful if we have changed access points on the same
+ * subnet.
+ */
+ DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
+ in_dev = dev->ip_ptr;
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ in_dev->ifa_list->ifa_address,
+ &dev,
+ in_dev->ifa_list->ifa_address,
+ NULL, dev->dev_addr, NULL);
+}
+
+/*
* Function set_multicast_list (dev)
*
* Configure the filtering of the device
*
*/
#define HW_MAX_ADDRS 4 /* Must query to get it! */
-void irlan_eth_set_multicast_list( struct device *dev)
+void irlan_eth_set_multicast_list(struct device *dev)
{
struct irlan_cb *self;
self = dev->priv;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
+ DEBUG(0, __FUNCTION__ "()\n");
+ return;
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
if (dev->flags&IFF_PROMISC) {
/* Enable promiscuous mode */
- DEBUG( 0, "Promiscous mode not implemented\n");
+ DEBUG(0, "Promiscous mode not implemented\n");
/* outw(MULTICAST|PROMISC, ioaddr); */
}
- else if ((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
+ else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
- DEBUG( 4, __FUNCTION__ "(), Setting multicast filter\n");
+ DEBUG(4, __FUNCTION__ "(), Setting multicast filter\n");
/* hardware_set_filter(NULL); */
- irlan_set_multicast_filter( self, TRUE);
+ irlan_set_multicast_filter(self, TRUE);
}
else if (dev->mc_count) {
- DEBUG( 4, __FUNCTION__ "(), Setting multicast filter\n");
+ DEBUG(4, __FUNCTION__ "(), Setting multicast filter\n");
/* Walk the address list, and load the filter */
/* hardware_set_filter(dev->mc_list); */
- irlan_set_multicast_filter( self, TRUE);
+ irlan_set_multicast_filter(self, TRUE);
}
else {
- DEBUG( 4, __FUNCTION__ "(), Clearing multicast filter\n");
- irlan_set_multicast_filter( self, FALSE);
+ DEBUG(4, __FUNCTION__ "(), Clearing multicast filter\n");
+ irlan_set_multicast_filter(self, FALSE);
}
- if ( dev->flags & IFF_BROADCAST) {
- DEBUG( 4, __FUNCTION__ "(), Setting broadcast filter\n");
- irlan_set_broadcast_filter( self, TRUE);
+ if (dev->flags & IFF_BROADCAST) {
+ DEBUG(4, __FUNCTION__ "(), Setting broadcast filter\n");
+ irlan_set_broadcast_filter(self, TRUE);
} else {
- DEBUG( 4, __FUNCTION__ "(), Clearing broadcast filter\n");
- irlan_set_broadcast_filter( self, FALSE);
+ DEBUG(4, __FUNCTION__ "(), Clearing broadcast filter\n");
+ irlan_set_broadcast_filter(self, FALSE);
}
}
@@ -385,12 +435,12 @@ void irlan_eth_set_multicast_list( struct device *dev)
* Get the current statistics for this device
*
*/
-struct enet_statistics *irlan_eth_get_stats( struct device *dev)
+struct enet_statistics *irlan_eth_get_stats(struct device *dev)
{
struct irlan_cb *self = (struct irlan_cb *) dev->priv;
- ASSERT( self != NULL, return NULL;);
- ASSERT( self->magic == IRLAN_MAGIC, return NULL;);
+ ASSERT(self != NULL, return NULL;);
+ ASSERT(self->magic == IRLAN_MAGIC, return NULL;);
return &self->stats;
}
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index 96fbccb58..d54e7cc12 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Oct 20 09:10:16 1998
- * Modified at: Sat Dec 5 14:52:22 1998
+ * Modified at: Wed Feb 3 21:42:27 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli, All Rights Reserved.
@@ -38,11 +38,23 @@ char *irlan_state[] = {
"IRLAN_SYNC",
};
-void irlan_next_state( struct irlan_cb *self,
- IRLAN_STATE state)
+void irlan_next_client_state( struct irlan_cb *self, IRLAN_STATE state)
{
+ DEBUG(2, __FUNCTION__"(), %s\n", irlan_state[state]);
+
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLAN_MAGIC, return;);
- self->state = state;
+ self->client.state = state;
}
+
+void irlan_next_provider_state( struct irlan_cb *self, IRLAN_STATE state)
+{
+ DEBUG(2, __FUNCTION__"(), %s\n", irlan_state[state]);
+
+ ASSERT( self != NULL, return;);
+ ASSERT( self->magic == IRLAN_MAGIC, return;);
+
+ self->provider.state = state;
+}
+
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
new file mode 100644
index 000000000..c4c1079dd
--- /dev/null
+++ b/net/irda/irlan/irlan_filter.c
@@ -0,0 +1,235 @@
+/*********************************************************************
+ *
+ * Filename: irlan_filter.c
+ * Version:
+ * Description:
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Fri Jan 29 11:16:38 1999
+ * Modified at: Thu Feb 25 15:10:54 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/skbuff.h>
+
+#include <net/irda/irlan_common.h>
+
+/*
+ * Function handle_filter_request (self, skb)
+ *
+ * Handle filter request from client peer device
+ *
+ */
+void handle_filter_request(struct irlan_cb *self, struct sk_buff *skb)
+{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ if ((self->provider.filter_type == IRLAN_DIRECTED) &&
+ (self->provider.filter_operation == DYNAMIC))
+ {
+ DEBUG(0, "Giving peer a dynamic Ethernet address\n");
+
+ self->provider.mac_address[0] = 0x40;
+ self->provider.mac_address[1] = 0x00;
+ self->provider.mac_address[2] = 0x00;
+ self->provider.mac_address[3] = 0x00;
+
+ /* Use arbitration value to generate MAC address */
+ if (self->access_type == ACCESS_PEER) {
+ self->provider.mac_address[4] =
+ self->provider.send_arb_val & 0xff;
+ self->provider.mac_address[5] =
+ (self->provider.send_arb_val >> 8) & 0xff;;
+ } else {
+ /* Just generate something for now */
+ self->provider.mac_address[4] = jiffies & 0xff;
+ self->provider.mac_address[5] = (jiffies >> 8) & 0xff;
+ }
+
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x03;
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
+ irlan_insert_short_param(skb, "MAX_ENTRY", 0x0001);
+ irlan_insert_array_param(skb, "FILTER_ENTRY", self->provider.mac_address, 6);
+ return;
+ }
+
+ if ((self->provider.filter_type == IRLAN_DIRECTED) &&
+ (self->provider.filter_mode == FILTER))
+ {
+ DEBUG(0, "Directed filter on\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_DIRECTED) &&
+ (self->provider.filter_mode == NONE))
+ {
+ DEBUG(0, "Directed filter off\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+
+ if ((self->provider.filter_type == IRLAN_BROADCAST) &&
+ (self->provider.filter_mode == FILTER))
+ {
+ DEBUG(0, "Broadcast filter on\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_BROADCAST) &&
+ (self->provider.filter_mode == NONE))
+ {
+ DEBUG(0, "Broadcast filter off\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_MULTICAST) &&
+ (self->provider.filter_mode == FILTER))
+ {
+ DEBUG(0, "Multicast filter on\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_MULTICAST) &&
+ (self->provider.filter_mode == NONE))
+ {
+ DEBUG(0, "Multicast filter off\n");
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x00;
+ return;
+ }
+ if ((self->provider.filter_type == IRLAN_MULTICAST) &&
+ (self->provider.filter_operation == GET))
+ {
+ DEBUG(0, "Multicast filter get\n");
+ skb->data[0] = 0x00; /* Success? */
+ skb->data[1] = 0x02;
+ irlan_insert_string_param(skb, "FILTER_MODE", "NONE");
+ irlan_insert_short_param(skb, "MAX_ENTRY", 16);
+ return;
+ }
+ skb->data[0] = 0x00; /* Command not supported */
+ skb->data[1] = 0x00;
+
+ DEBUG(0, "Not implemented!\n");
+}
+
+/*
+ * Function check_request_param (self, param, value)
+ *
+ * Check parameters in request from peer device
+ *
+ */
+void irlan_check_command_param(struct irlan_cb *self, char *param,
+ char *value)
+{
+ __u8 *bytes;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ bytes = value;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ DEBUG(4, "%s, %s\n", param, value);
+
+ /*
+ * This is experimental!! DB.
+ */
+ if (strcmp(param, "MODE") == 0) {
+ DEBUG(0, __FUNCTION__ "()\n");
+ self->use_udata = TRUE;
+ return;
+ }
+
+ /*
+ * FILTER_TYPE
+ */
+ if (strcmp(param, "FILTER_TYPE") == 0) {
+ if (strcmp(value, "DIRECTED") == 0) {
+ self->provider.filter_type = IRLAN_DIRECTED;
+ return;
+ }
+ if (strcmp(value, "MULTICAST") == 0) {
+ self->provider.filter_type = IRLAN_MULTICAST;
+ return;
+ }
+ if (strcmp(value, "BROADCAST") == 0) {
+ self->provider.filter_type = IRLAN_BROADCAST;
+ return;
+ }
+ }
+ /*
+ * FILTER_MODE
+ */
+ if (strcmp(param, "FILTER_MODE") == 0) {
+ if (strcmp(value, "ALL") == 0) {
+ self->provider.filter_mode = ALL;
+ return;
+ }
+ if (strcmp(value, "FILTER") == 0) {
+ self->provider.filter_mode = FILTER;
+ return;
+ }
+ if (strcmp(value, "NONE") == 0) {
+ self->provider.filter_mode = FILTER;
+ return;
+ }
+ }
+ /*
+ * FILTER_OPERATION
+ */
+ if (strcmp(param, "FILTER_OPERATION") == 0) {
+ if (strcmp(value, "DYNAMIC") == 0) {
+ self->provider.filter_operation = DYNAMIC;
+ return;
+ }
+ if (strcmp(value, "GET") == 0) {
+ self->provider.filter_operation = GET;
+ return;
+ }
+ }
+}
+
+int irlan_print_filter(int filter_type, char *buf)
+{
+ int len = 0;
+
+ if (filter_type & IRLAN_DIRECTED)
+ len += sprintf(buf+len, "%s", "DIRECTED ");
+ if (filter_type & IRLAN_FUNCTIONAL)
+ len += sprintf(buf+len, "%s", "FUNCTIONAL ");
+ if (filter_type & IRLAN_GROUP)
+ len += sprintf(buf+len, "%s", "GROUP ");
+ if (filter_type & IRLAN_MAC_FRAME)
+ len += sprintf(buf+len, "%s", "MAC_FRAME ");
+ if (filter_type & IRLAN_MULTICAST)
+ len += sprintf(buf+len, "%s", "MULTICAST ");
+ if (filter_type & IRLAN_BROADCAST)
+ len += sprintf(buf+len, "%s", "BROADCAST ");
+ if (filter_type & IRLAN_IPX_SOCKET)
+ len += sprintf(buf+len, "%s", "IPX_SOCKET");
+
+ len += sprintf(buf+len, "\n");
+
+ return len;
+}
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
new file mode 100644
index 000000000..8e2c3c25a
--- /dev/null
+++ b/net/irda/irlan/irlan_provider.c
@@ -0,0 +1,425 @@
+/*********************************************************************
+ *
+ * Filename: irlan_provider.c
+ * Version: 0.9
+ * Description: IrDA LAN Access Protocol Implementation
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Thu Apr 22 14:28:52 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
+ *
+ * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irttp.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irias_object.h>
+#include <net/irda/iriap.h>
+#include <net/irda/timer.h>
+
+#include <net/irda/irlan_common.h>
+#include <net/irda/irlan_eth.h>
+#include <net/irda/irlan_event.h>
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_filter.h>
+#include <net/irda/irlan_client.h>
+
+/*
+ * Function irlan_provider_control_data_indication (handle, skb)
+ *
+ * This function gets the data that is received on the control channel
+ *
+ */
+int irlan_provider_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
+{
+ struct irlan_cb *self;
+ __u8 code;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) instance;
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ ASSERT(skb != NULL, return -1;);
+
+ code = skb->data[0];
+ switch(code) {
+ case CMD_GET_PROVIDER_INFO:
+ DEBUG(4, "Got GET_PROVIDER_INFO command!\n");
+ irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb);
+ break;
+
+ case CMD_GET_MEDIA_CHAR:
+ DEBUG(4, "Got GET_MEDIA_CHAR command!\n");
+ irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb);
+ break;
+ case CMD_OPEN_DATA_CHANNEL:
+ DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n");
+ irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb);
+ break;
+ case CMD_FILTER_OPERATION:
+ DEBUG(4, "Got FILTER_OPERATION command!\n");
+ irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb);
+ break;
+ case CMD_RECONNECT_DATA_CHAN:
+ DEBUG(2, __FUNCTION__"(), Got RECONNECT_DATA_CHAN command\n");
+ DEBUG(2, __FUNCTION__"(), NOT IMPLEMENTED\n");
+ break;
+ case CMD_CLOSE_DATA_CHAN:
+ DEBUG(2, "Got CLOSE_DATA_CHAN command!\n");
+ DEBUG(2, __FUNCTION__"(), NOT IMPLEMENTED\n");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown command!\n");
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Function irlan_provider_connect_indication (handle, skb, priv)
+ *
+ * Got connection from peer IrLAN layer
+ *
+ */
+void irlan_provider_connect_indication(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size, struct sk_buff *skb)
+{
+ struct irlan_cb *self, *entry, *new;
+ struct tsap_cb *tsap;
+
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ self = (struct irlan_cb *) instance;
+ tsap = (struct tsap_cb *) sap;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ ASSERT(tsap == self->provider.tsap_ctrl,return;);
+ ASSERT(self->provider.state == IRLAN_IDLE, return;);
+
+ /* Check if this provider is currently unused */
+ if (self->daddr == DEV_ADDR_ANY) {
+ /*
+ * Rehash instance, now we have a client (daddr) to serve.
+ */
+ entry = hashbin_remove(irlan, self->daddr, NULL);
+ ASSERT( entry == self, return;);
+
+ self->daddr = irttp_get_daddr(tsap);
+ DEBUG(2, __FUNCTION__ "(), daddr=%08x\n", self->daddr);
+ hashbin_insert(irlan, (QUEUE*) self, self->daddr, NULL);
+ } else {
+ /*
+ * If we already have the daddr set, this means that the
+ * client must already have started (peer mode). We must
+ * make sure that this connection attempt is from the same
+ * device as the client is dealing with!
+ */
+ ASSERT(self->daddr == irttp_get_daddr(tsap), return;);
+ }
+
+ /* Update saddr, since client may have moved to a new link */
+ self->saddr = irttp_get_saddr(tsap);
+ DEBUG(2, __FUNCTION__ "(), saddr=%08x\n", self->saddr);
+
+ /* Check if network device has been registered */
+ if (!self->netdev_registered)
+ irlan_register_netdev(self);
+
+ irlan_do_provider_event(self, IRLAN_CONNECT_INDICATION, NULL);
+
+ /*
+ * If we are in peer mode, the client may not have got the discovery
+ * indication it needs to make progress. If the client is still in
+ * IDLE state, we must kick it to
+ */
+ if ((self->access_type == ACCESS_PEER) &&
+ (self->client.state == IRLAN_IDLE))
+ irlan_client_wakeup(self, self->saddr, self->daddr);
+}
+
+/*
+ * Function irlan_provider_connect_response (handle)
+ *
+ * Accept incomming connection
+ *
+ */
+void irlan_provider_connect_response(struct irlan_cb *self,
+ struct tsap_cb *tsap)
+{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ /* Just accept */
+ irttp_connect_response(tsap, IRLAN_MTU, NULL);
+
+ /* Check if network device has been registered */
+ if (!self->netdev_registered)
+ irlan_register_netdev(self);
+
+}
+
+void irlan_provider_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *userdata)
+{
+ struct irlan_cb *self;
+ struct tsap_cb *tsap;
+
+ DEBUG(4, __FUNCTION__ "(), reason=%d\n", reason);
+
+ self = (struct irlan_cb *) instance;
+ tsap = (struct tsap_cb *) sap;
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+ ASSERT(tsap != NULL, return;);
+ ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
+
+ ASSERT(tsap == self->provider.tsap_ctrl, return;);
+
+ irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
+}
+
+/*
+ * Function irlan_parse_open_data_cmd (self, skb)
+ *
+ *
+ *
+ */
+int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb);
+
+ return ret;
+}
+
+/*
+ * Function parse_command (skb)
+ *
+ * Extract all parameters from received buffer, then feed them to
+ * check_params for parsing
+ *
+ */
+int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
+ struct sk_buff *skb)
+{
+ __u8 *frame;
+ __u8 *ptr;
+ int count;
+ __u16 val_len;
+ int i;
+ char *name;
+ char *value;
+ int ret = RSP_SUCCESS;
+
+ ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
+
+ DEBUG(4, __FUNCTION__ "(), skb->len=%d\n", (int)skb->len);
+
+ ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
+
+ if (!skb)
+ return -RSP_PROTOCOL_ERROR;
+
+ frame = skb->data;
+
+ name = kmalloc(255, GFP_ATOMIC);
+ if (!name)
+ return -RSP_INSUFFICIENT_RESOURCES;
+ value = kmalloc(1016, GFP_ATOMIC);
+ if (!value) {
+ kfree(name);
+ return -RSP_INSUFFICIENT_RESOURCES;
+ }
+
+ /* How many parameters? */
+ count = frame[1];
+
+ DEBUG(4, "Got %d parameters\n", count);
+
+ ptr = frame+2;
+
+ /* For all parameters */
+ for (i=0; i<count;i++) {
+ ret = irlan_extract_param(ptr, name, value, &val_len);
+ if (ret < 0) {
+ DEBUG(2, __FUNCTION__ "(), IrLAN, Error!\n");
+ break;
+ }
+ ptr+=ret;
+ ret = RSP_SUCCESS;
+ irlan_check_command_param(self, name, value);
+ }
+ /* Cleanup */
+ kfree(name);
+ kfree(value);
+
+ return ret;
+}
+
+/*
+ * Function irlan_provider_send_reply (self, info)
+ *
+ * Send reply to query to peer IrLAN layer
+ *
+ */
+void irlan_provider_send_reply(struct irlan_cb *self, int command,
+ int ret_code)
+{
+ struct sk_buff *skb;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == IRLAN_MAGIC, return;);
+
+ skb = dev_alloc_skb(128);
+ if (!skb)
+ return;
+
+ /* Reserve space for TTP, LMP, and LAP header */
+ skb_reserve(skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
+ skb_put(skb, 2);
+
+ switch (command) {
+ case CMD_GET_PROVIDER_INFO:
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x02; /* 2 parameters */
+ switch (self->media) {
+ case MEDIA_802_3:
+ irlan_insert_string_param(skb, "MEDIA", "802.3");
+ break;
+ case MEDIA_802_5:
+ irlan_insert_string_param(skb, "MEDIA", "802.5");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), unknown media type!\n");
+ break;
+ }
+ irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
+ break;
+ case CMD_GET_MEDIA_CHAR:
+ skb->data[0] = 0x00; /* Success */
+ skb->data[1] = 0x05; /* 5 parameters */
+ irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
+ irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
+ irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
+
+ switch(self->access_type) {
+ case ACCESS_DIRECT:
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
+ break;
+ case ACCESS_PEER:
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "PEER");
+ break;
+ case ACCESS_HOSTED:
+ irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED");
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown access type\n");
+ break;
+ }
+ irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee);
+ break;
+ case CMD_OPEN_DATA_CHANNEL:
+ skb->data[0] = 0x00; /* Success */
+ if (self->provider.send_arb_val) {
+ skb->data[1] = 0x03; /* 3 parameters */
+ irlan_insert_short_param(skb, "CON_ARB",
+ self->provider.send_arb_val);
+ } else
+ skb->data[1] = 0x02; /* 2 parameters */
+ irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data);
+ irlan_insert_array_param(skb, "RECONNECT_KEY", "LINUX RULES!",
+ 12);
+ break;
+ case CMD_FILTER_OPERATION:
+ handle_filter_request(self, skb);
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown command!\n");
+ break;
+ }
+
+ irttp_data_request(self->provider.tsap_ctrl, skb);
+}
+
+/*
+ * Function irlan_provider_register(void)
+ *
+ * Register provider support so we can accept incomming connections.
+ *
+ */
+int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
+{
+ struct notify_t notify;
+ struct tsap_cb *tsap;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ /* Check if already open */
+ if (self->provider.tsap_ctrl)
+ return -1;
+
+ /*
+ * First register well known control TSAP
+ */
+ irda_notify_init(&notify);
+ notify.data_indication = irlan_provider_data_indication;
+ notify.connect_indication = irlan_provider_connect_indication;
+ notify.disconnect_indication = irlan_provider_disconnect_indication;
+ notify.instance = self;
+ strncpy(notify.name, "IrLAN ctrl (p)", 16);
+
+ tsap = irttp_open_tsap(LSAP_ANY, 1, &notify);
+ if (!tsap) {
+ DEBUG(2, __FUNCTION__ "(), Got no tsap!\n");
+ return -1;
+ }
+ self->provider.tsap_ctrl = tsap;
+
+ /* Register with LM-IAS */
+ irlan_ias_register(self, tsap->stsap_sel);
+
+ return 0;
+}
+
diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c
new file mode 100644
index 000000000..6bdf503f1
--- /dev/null
+++ b/net/irda/irlan/irlan_provider_event.c
@@ -0,0 +1,247 @@
+/*********************************************************************
+ *
+ * Filename: irlan_provider_event.c
+ * Version: 0.9
+ * Description: IrLAN provider state machine)
+ * Status: Experimental.
+ * Author: Dag Brattli <dagb@cs.uit.no>
+ * Created at: Sun Aug 31 20:14:37 1997
+ * Modified at: Thu Apr 22 10:46:28 1999
+ * Modified by: Dag Brattli <dagb@cs.uit.no>
+ *
+ * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+#include <net/irda/irda.h>
+#include <net/irda/iriap.h>
+#include <net/irda/irlmp.h>
+#include <net/irda/irttp.h>
+
+#include <net/irda/irlan_provider.h>
+#include <net/irda/irlan_event.h>
+
+static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb);
+
+static int (*state[])(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb) =
+{
+ irlan_provider_state_idle,
+ NULL, /* Query */
+ NULL, /* Info */
+ irlan_provider_state_info,
+ NULL, /* Media */
+ irlan_provider_state_open,
+ NULL, /* Wait */
+ NULL, /* Arb */
+ irlan_provider_state_data,
+ NULL, /* Close */
+ NULL, /* Sync */
+};
+
+void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ ASSERT(*state[ self->provider.state] != NULL, return;);
+
+ (*state[self->provider.state]) (self, event, skb);
+}
+
+/*
+ * Function irlan_provider_state_idle (event, skb, info)
+ *
+ * IDLE, We are waiting for an indication that there is a provider
+ * available.
+ */
+static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_CONNECT_INDICATION:
+ irlan_provider_connect_response( self, self->provider.tsap_ctrl);
+ irlan_next_provider_state( self, IRLAN_INFO);
+ break;
+ default:
+ DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_provider_state_info (self, event, skb, info)
+ *
+ * INFO, We have issued a GetInfo command and is awaiting a reply.
+ */
+static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_GET_INFO_CMD:
+ /* Be sure to use 802.3 in case of peer mode */
+ if (self->access_type == ACCESS_PEER) {
+ self->media = MEDIA_802_3;
+
+ /* Check if client has started yet */
+ if (self->client.state == IRLAN_IDLE) {
+ /* This should get the client going */
+ irlmp_discovery_request(8);
+ }
+ }
+
+ irlan_provider_send_reply(self, CMD_GET_PROVIDER_INFO,
+ RSP_SUCCESS);
+ /* Keep state */
+ break;
+ case IRLAN_GET_MEDIA_CMD:
+ irlan_provider_send_reply(self, CMD_GET_MEDIA_CHAR,
+ RSP_SUCCESS);
+ /* Keep state */
+ break;
+ case IRLAN_OPEN_DATA_CMD:
+ ret = irlan_parse_open_data_cmd(self, skb);
+ if (self->access_type == ACCESS_PEER) {
+ /* FIXME: make use of random functions! */
+ self->provider.send_arb_val = (jiffies & 0xffff);
+ }
+ irlan_provider_send_reply(self, CMD_OPEN_DATA_CHANNEL, ret);
+
+ if (ret == RSP_SUCCESS) {
+ irlan_next_provider_state(self, IRLAN_OPEN);
+
+ /* Signal client that we are now open */
+ irlan_do_client_event(self, IRLAN_PROVIDER_SIGNAL, NULL);
+ }
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_provider_state(self, IRLAN_IDLE);
+ break;
+ default:
+ DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_provider_state_open (self, event, skb, info)
+ *
+ * OPEN, The client has issued a OpenData command and is awaiting a
+ * reply
+ *
+ */
+static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+
+ switch(event) {
+ case IRLAN_FILTER_CONFIG_CMD:
+ irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb);
+ irlan_provider_send_reply(self, CMD_FILTER_OPERATION,
+ RSP_SUCCESS);
+ /* Keep state */
+ break;
+ case IRLAN_DATA_CONNECT_INDICATION:
+ irlan_next_provider_state(self, IRLAN_DATA);
+ irlan_provider_connect_response(self, self->tsap_data);
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ irlan_next_provider_state(self, IRLAN_IDLE);
+ break;
+ default:
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+/*
+ * Function irlan_provider_state_data (self, event, skb, info)
+ *
+ * DATA, The data channel is connected, allowing data transfers between
+ * the local and remote machines.
+ *
+ */
+static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event,
+ struct sk_buff *skb)
+{
+ struct irmanager_event mgr_event;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == IRLAN_MAGIC, return -1;);
+
+ switch(event) {
+ case IRLAN_FILTER_CONFIG_CMD:
+ irlan_provider_parse_command(self, CMD_FILTER_OPERATION, skb);
+ irlan_provider_send_reply(self, CMD_FILTER_OPERATION,
+ RSP_SUCCESS);
+ break;
+ case IRLAN_LMP_DISCONNECT: /* FALLTHROUGH */
+ case IRLAN_LAP_DISCONNECT:
+ mgr_event.event = EVENT_IRLAN_STOP;
+ sprintf(mgr_event.devname, "%s", self->ifname);
+ irmanager_notify(&mgr_event);
+
+ irlan_next_provider_state(self, IRLAN_IDLE);
+ break;
+ default:
+ DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event);
+ break;
+ }
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/net/irda/irlan/irlan_srv.c b/net/irda/irlan/irlan_srv.c
deleted file mode 100644
index df4c6f954..000000000
--- a/net/irda/irlan/irlan_srv.c
+++ /dev/null
@@ -1,915 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_srv.c
- * Version: 0.1
- * Description: IrDA LAN Access Protocol Implementation
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Mon Dec 14 19:10:49 1998
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
- * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
- * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
- *
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-
-#include <asm/system.h>
-#include <asm/bitops.h>
-#include <asm/byteorder.h>
-
-
-#include <net/irda/irda.h>
-#include <net/irda/irttp.h>
-#include <net/irda/irlmp.h>
-#include <net/irda/irias_object.h>
-#include <net/irda/iriap.h>
-#include <net/irda/timer.h>
-
-#include <net/irda/irlan_common.h>
-#include <net/irda/irlan_eth.h>
-#include <net/irda/irlan_event.h>
-#include <net/irda/irlan_srv.h>
-
-/*
- * Private functions
- */
-static void __irlan_server_close( struct irlan_cb *self);
-static int irlan_server_dev_init( struct device *dev);
-static int irlan_server_dev_open(struct device *dev);
-static int irlan_server_dev_close(struct device *dev);
-static void irlan_check_param( struct irlan_cb *self, char *param, char *value);
-
-/*
- * Function irlan_server_init (dev)
- *
- * Allocates the master array. Called by modprobe().
- */
-__initfunc(int irlan_server_init( void))
-{
- DEBUG( 4, "--> irlan_server_init\n");
-
- /* Register with IrLMP as a service */
- irlmp_register_layer( S_LAN, SERVER, FALSE, NULL);
-
- irlan_server_register();
-
- DEBUG( 4, "irlan_server_init -->\n");
-
- return 0;
-}
-
-/*
- * Function irlan_server_cleanup (void)
- *
- * Removes all instances of the IrLAN network device driver, and the
- * master array. Called by rmmod().
- */
-void irlan_server_cleanup(void)
-{
- DEBUG( 4, "--> irlan_server_cleanup\n");
-
- irlmp_unregister_layer( S_LAN, SERVER);
-
- /*
- * Delete hashbin and close all irlan client instances in it
- */
- /* hashbin_delete( irlan, (FREE_FUNC) __irlan_server_close); */
-
- DEBUG( 4, "irlan_server_cleanup -->\n");
-}
-
-/*
- * Function irlan_server_open (void)
- *
- * This function allocates and opens a new instance of the IrLAN network
- * device driver.
- */
-struct irlan_cb *irlan_server_open(void)
-{
- struct irlan_cb *self;
- int result;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- /*
- * Initialize the irlan_server structure.
- */
-
- self = kmalloc( sizeof(struct irlan_cb), GFP_ATOMIC);
- if ( self == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Unable to kmalloc!\n");
- return NULL;
- }
- memset( self, 0, sizeof(struct irlan_cb));
-
- /*
- * Initialize local device structure
- */
- self->magic = IRLAN_MAGIC;
-
- self->dev.name = self->ifname;
- self->dev.init = irlan_server_dev_init;
- self->dev.priv = (void *) self;
- self->dev.next = NULL;
-
- if (( result = register_netdev( &self->dev)) != 0) {
- DEBUG( 0, __FUNCTION__ "(), register_netdev() failed!\n");
- return NULL;
- }
-
- irlan_next_state( self, IRLAN_IDLE);
-
- hashbin_insert( irlan , (QUEUE *) self, (int) self, NULL);
-
- return self;
-}
-
-/*
- * Function irlan_server_dev_init (dev)
- *
- * The network device initialization function. Called only once.
- *
- */
-static int irlan_server_dev_init( struct device *dev)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( dev != NULL, return -1;);
-
- irlan_eth_init( dev);
-
- /* Overrride some functions */
- dev->open = irlan_server_dev_open;
- dev->stop = irlan_server_dev_close;
-
- /*
- * OK, since we are emulating an IrLAN sever we will have to give
- * ourself an ethernet address!
- * FIXME: this must be more dynamically
- */
- dev->dev_addr[0] = 0x40;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x00;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x23;
- dev->dev_addr[5] = 0x45;
-
- return 0;
-}
-
-/*
- * Function irlan_server_dev_open (dev)
- *
- * Start the Servers ether network device, this function will be called by
- * "ifconfig server0 up".
- */
-static int irlan_server_dev_open( struct device *dev)
-{
- /* struct irlan_cb *self = (struct irlan_cb *) dev->priv; */
-
- DEBUG( 4, "irlan_server_dev_open()\n");
-
- ASSERT( dev != NULL, return -1;);
-
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
-
- MOD_INC_USE_COUNT;
-
- return 0;
-}
-
-static void __irlan_server_close( struct irlan_cb *self)
-{
- DEBUG( 4, "--> irlan_server_close()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- /*
- * Disconnect open TSAP connections
- */
- if ( self->tsap_data) {
- irttp_disconnect_request( self->tsap_data, NULL, P_HIGH);
-
- /* FIXME: this will close the tsap before the disconenct
- * frame has been sent
- */
- /* irttp_close_tsap( self->tsap_data); */
- }
- if ( self->tsap_ctrl) {
- irttp_disconnect_request( self->tsap_ctrl, NULL, P_HIGH);
-
- /* irttp_close_tsap( self->tsap_control); */
- }
-
- unregister_netdev( &self->dev);
-
- self->magic = ~IRLAN_MAGIC;
-
- kfree( self);
-
- DEBUG( 4, "irlan_server_close() -->\n");
-}
-
-/*
- * Function irlan_server_close (self)
- *
- * This function closes and marks the IrLAN instance as not in use.
- */
-void irlan_server_close( struct irlan_cb *self)
-{
- struct irlan_cb *entry;
-
- DEBUG( 4, "--> irlan_server_close()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- entry = hashbin_remove( irlan, (int) self, NULL);
-
- ASSERT( entry == self, return;);
-
- __irlan_server_close( self);
-
- DEBUG( 4, "irlan_server_close() -->\n");
-}
-
-/*
- * Function irlan_server_dev_close (dev)
- *
- * Stop the IrLAN ether network device, his function will be called by
- * ifconfig down.
- */
-static int irlan_server_dev_close( struct device *dev)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( dev != NULL, return -1;);
-
- dev->tbusy = 1;
- dev->start = 0;
-
- MOD_DEC_USE_COUNT;
-
- return 0;
-}
-
-
-/*
- * Function irlan_server_disconnect_indication (handle, reason, priv)
- *
- * Callback function for the IrTTP layer. Indicates a disconnection of
- * the specified connection (handle)
- *
- */
-void irlan_server_disconnect_indication( void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *skb)
-{
- struct irlan_info info;
- struct irlan_cb *self;
- struct tsap_cb *tsap;
-
- DEBUG( 4, __FUNCTION__ "(), Reason=%d\n", reason);
-
- self = ( struct irlan_cb *) instance;
- tsap = ( struct tsap_cb *) sap;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- info.daddr = self->daddr;
-
- if ( tsap == self->tsap_data) {
- DEBUG( 0, "IrLAN, data channel disconnected by peer!\n");
- self->connected = FALSE;
- } else if ( tsap == self->tsap_ctrl) {
- DEBUG( 0, "IrLAN, control channel disconnected by peer!\n");
- } else {
- DEBUG( 0, "Error, disconnect on unknown handle!\n");
- }
-
- /* Stop IP from transmitting more packets */
- /* irlan_flow_indication( handle, FLOW_STOP, priv); */
-
- irlan_do_server_event( self, IRLAN_LMP_DISCONNECT, NULL, NULL);
-}
-
-/*
- * Function irlan_server_control_data_indication (handle, skb)
- *
- * This function gets the data that is received on the control channel
- *
- */
-void irlan_server_control_data_indication( void *instance, void *sap,
- struct sk_buff *skb)
-{
- struct irlan_cb *self;
- __u8 code;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irlan_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- ASSERT( skb != NULL, return;);
-
- code = skb->data[0];
- switch( code) {
- case CMD_GET_PROVIDER_INFO:
- DEBUG( 4, "Got GET_PROVIDER_INFO command!\n");
- irlan_do_server_event( self, IRLAN_GET_INFO_CMD, skb, NULL);
- break;
-
- case CMD_GET_MEDIA_CHAR:
- DEBUG( 4, "Got GET_MEDIA_CHAR command!\n");
- irlan_do_server_event( self, IRLAN_GET_MEDIA_CMD, skb, NULL);
- break;
- case CMD_OPEN_DATA_CHANNEL:
- DEBUG( 4, "Got OPEN_DATA_CHANNEL command!\n");
- irlan_do_server_event( self, IRLAN_OPEN_DATA_CMD, skb, NULL);
- break;
- case CMD_FILTER_OPERATION:
- DEBUG( 4, "Got FILTER_OPERATION command!\n");
- irlan_do_server_event( self, IRLAN_FILTER_CONFIG_CMD, skb,
- NULL);
- break;
- case CMD_RECONNECT_DATA_CHAN:
- DEBUG( 0, __FUNCTION__"(), Got RECONNECT_DATA_CHAN command\n");
- DEBUG( 0, __FUNCTION__"(), NOT IMPLEMENTED\n");
- break;
- case CMD_CLOSE_DATA_CHAN:
- DEBUG( 0, "Got CLOSE_DATA_CHAN command!\n");
- DEBUG( 0, __FUNCTION__"(), NOT IMPLEMENTED\n");
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown command!\n");
- break;
- }
-}
-
-/*
- * Function irlan_server_connect_indication (handle, skb, priv)
- *
- * Got connection from peer IrLAN layer
- *
- */
-void irlan_server_connect_indication( void *instance, void *sap,
- struct qos_info *qos, int max_sdu_size,
- struct sk_buff *skb)
-{
- struct irlan_cb *self;
- struct irlan_info info;
- struct tsap_cb *tsap;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irlan_cb *) instance;
- tsap = ( struct tsap_cb *) sap;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- info.tsap = tsap;
-
- if ( tsap == self->tsap_data)
- irlan_do_server_event( self, IRLAN_DATA_CONNECT_INDICATION,
- NULL, &info);
- else
- irlan_do_server_event( self, IRLAN_CONNECT_INDICATION, NULL,
- &info);
-}
-
-/*
- * Function irlan_server_connect_response (handle)
- *
- * Accept incomming connection
- *
- */
-void irlan_server_connect_response( struct irlan_cb *self,
- struct tsap_cb *tsap)
-{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- /* FIXME: define this value */
- irttp_connect_response( tsap, 1518, NULL);
-}
-
-/*
- * Function irlan_server_get_provider_info (self)
- *
- * Send Get Provider Information command to peer IrLAN layer
- *
- */
-void irlan_server_get_provider_info( struct irlan_cb *self)
-{
- struct sk_buff *skb;
- __u8 *frame;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0,"irlan_server_get_provider_info: "
- "Could not allocate an sk_buff of length %d\n", 64);
- return;
- }
-
- /* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
-
- frame = skb->data;
-
- frame[0] = CMD_GET_PROVIDER_INFO;
- frame[1] = 0x00; /* Zero parameters */
-
- irttp_data_request( self->tsap_ctrl, skb);
-}
-
-/*
- * Function irlan_parse_open_data_cmd (self, skb)
- *
- *
- *
- */
-int irlan_parse_open_data_cmd( struct irlan_cb *self, struct sk_buff *skb)
-{
- int ret = RSP_SUCCESS;
-
- irlan_server_extract_params( self, CMD_OPEN_DATA_CHANNEL, skb);
-
- return ret;
-}
-
-/*
- * Function extract_params (skb)
- *
- * Extract all parameters from received buffer, then feed them to
- * check_params for parsing
- *
- */
-int irlan_server_extract_params( struct irlan_cb *self, int cmd,
- struct sk_buff *skb)
-{
- __u8 *frame;
- __u8 *ptr;
- int count;
- __u8 name_len;
- __u16 val_len;
- int i;
-
- ASSERT( skb != NULL, return -RSP_PROTOCOL_ERROR;);
-
- DEBUG( 4, __FUNCTION__ "(), skb->len=%d\n", (int)skb->len);
-
- ASSERT( self != NULL, return -RSP_PROTOCOL_ERROR;);
- ASSERT( self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
-
- if ( skb == NULL) {
- DEBUG( 0, "extract_params: Got NULL skb!\n");
- return -RSP_PROTOCOL_ERROR;
- }
- frame = skb->data;
-
- /* How many parameters? */
- count = frame[1];
-
- DEBUG( 4, "Got %d parameters\n", count);
-
- ptr = frame+2;
-
- /* For all parameters */
- for ( i=0; i<count;i++) {
- /* get length of parameter name ( 1 byte) */
- name_len = *ptr++;
-
- if (name_len > 255) {
- DEBUG( 0, "extract_params, name_len > 255\n");
- return -RSP_PROTOCOL_ERROR;
- }
-
- /* get parameter name */
- memcpy( self->name, ptr, name_len);
- self->name[ name_len] = '\0';
- ptr+=name_len;
-
- /*
- * Get length of parameter value ( 2 bytes in little endian
- * format)
- */
- val_len = *ptr++ & 0xff;
- val_len |= *ptr++ << 8;
-
- if (val_len > 1016) {
- DEBUG( 0,
- "extract_params, parameter length to long\n");
- return -RSP_PROTOCOL_ERROR;
- }
-
- /* get parameter value */
- memcpy( self->value, ptr, val_len);
- self->value[ val_len] = '\0';
- ptr+=val_len;
-
- DEBUG( 4, "Parameter: %s ", self->name);
- DEBUG( 4, "Value: %s\n", self->value);
-
- irlan_check_param( self, self->name, self->value);
- }
- return RSP_SUCCESS;
-}
-
-/*
- * Function handle_filter_request (self, skb)
- *
- * Handle filter request from client peer device
- *
- */
-void handle_filter_request( struct irlan_cb *self, struct sk_buff *skb)
-{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- if (( self->t.server.filter_type == IR_DIRECTED) &&
- ( self->t.server.filter_operation == DYNAMIC))
- {
- DEBUG( 0, "Giving peer a dynamic Ethernet address\n");
-
- self->t.server.mac_address[0] = 0x40;
- self->t.server.mac_address[1] = 0x00;
- self->t.server.mac_address[2] = 0x00;
- self->t.server.mac_address[3] = 0x00;
- self->t.server.mac_address[4] = 0x12;
- self->t.server.mac_address[5] = 0x34;
-
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x03;
- insert_param( skb, "FILTER_MODE", 1, "NONE", 0, 0);
- insert_param( skb, "MAX_ENTRY", 3, NULL, 0, 0x0001);
- insert_array_param( skb, "FILTER_ENTRY", self->t.server.mac_address, 6);
- return;
- }
-
- if (( self->t.server.filter_type == IR_DIRECTED) &&
- ( self->t.server.filter_mode == FILTER))
- {
- DEBUG( 0, "Directed filter on\n");
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x00;
- return;
- }
- if (( self->t.server.filter_type == IR_DIRECTED) &&
- ( self->t.server.filter_mode == NONE))
- {
- DEBUG( 0, "Directed filter off\n");
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x00;
- return;
- }
-
- if (( self->t.server.filter_type == IR_BROADCAST) &&
- ( self->t.server.filter_mode == FILTER))
- {
- DEBUG( 0, "Broadcast filter on\n");
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x00;
- return;
- }
- if (( self->t.server.filter_type == IR_BROADCAST) &&
- ( self->t.server.filter_mode == NONE))
- {
- DEBUG( 0, "Broadcast filter off\n");
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x00;
- return;
- }
- if (( self->t.server.filter_type == IR_MULTICAST) &&
- ( self->t.server.filter_mode == FILTER))
- {
- DEBUG( 0, "Multicast filter on\n");
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x00;
- return;
- }
- if (( self->t.server.filter_type == IR_MULTICAST) &&
- ( self->t.server.filter_mode == NONE))
- {
- DEBUG( 0, "Multicast filter off\n");
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x00;
- return;
- }
- if (( self->t.server.filter_type == IR_MULTICAST) &&
- ( self->t.server.filter_operation == GET))
- {
- DEBUG( 0, "Multicast filter get\n");
- skb->data[0] = 0x00; /* Success? */
- skb->data[1] = 0x02;
- insert_param( skb, "FILTER_MODE", 1, "NONE", 0, 0);
- insert_param( skb, "MAX_ENTRY", 3, NULL, 0, 16);
- return;
- }
- skb->data[0] = 0x00; /* Command not supported */
- skb->data[1] = 0x00;
-
- DEBUG( 0, "Not implemented!\n");
-}
-
-/*
- * Function check_request_param (self, param, value)
- *
- * Check parameters in request from peer device
- *
- */
-static void irlan_check_param( struct irlan_cb *self, char *param, char *value)
-{
- __u8 *bytes;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- bytes = value;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- DEBUG( 4, "%s, %s\n", param, value);
-
- /*
- * This is experimental!! DB.
- */
- if ( strcmp( param, "MODE") == 0) {
- DEBUG( 0, __FUNCTION__ "()\n");
- self->use_udata = TRUE;
- return;
- }
-
- /*
- * FILTER_TYPE
- */
- if ( strcmp( param, "FILTER_TYPE") == 0) {
- if ( strcmp( value, "DIRECTED") == 0) {
- self->t.server.filter_type = IR_DIRECTED;
- return;
- }
- if ( strcmp( value, "MULTICAST") == 0) {
- self->t.server.filter_type = IR_MULTICAST;
- return;
- }
- if ( strcmp( value, "BROADCAST") == 0) {
- self->t.server.filter_type = IR_BROADCAST;
- return;
- }
- }
- /*
- * FILTER_MODE
- */
- if ( strcmp( param, "FILTER_MODE") == 0) {
- if ( strcmp( value, "ALL") == 0) {
- self->t.server.filter_mode = ALL;
- return;
- }
- if ( strcmp( value, "FILTER") == 0) {
- self->t.server.filter_mode = FILTER;
- return;
- }
- if ( strcmp( value, "NONE") == 0) {
- self->t.server.filter_mode = FILTER;
- return;
- }
- }
- /*
- * FILTER_OPERATION
- */
- if ( strcmp( param, "FILTER_OPERATION") == 0) {
- if ( strcmp( value, "DYNAMIC") == 0) {
- self->t.server.filter_operation = DYNAMIC;
- return;
- }
- if ( strcmp( value, "GET") == 0) {
- self->t.server.filter_operation = GET;
- return;
- }
- }
-}
-
-/*
- * Function irlan_server_send_reply (self, info)
- *
- * Send reply to query to peer IrLAN layer
- *
- */
-void irlan_server_send_reply( struct irlan_cb *self, int command, int ret_code)
-{
- struct sk_buff *skb;
-
- DEBUG( 4, "irlan_server_send_reply()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLAN_MAGIC, return;);
-
- skb = dev_alloc_skb( 128);
- if (skb == NULL) {
- DEBUG( 0,"irlan_server_send_reply: "
- "Could not allocate an sk_buff of length %d\n", 128);
- return;
- }
-
- /* Reserve space for TTP, LMP, and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_HEADER+LAP_HEADER);
- skb_put( skb, 2);
-
- switch ( command) {
- case CMD_GET_PROVIDER_INFO:
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x02; /* 2 parameters */
- insert_param( skb, "MEDIA", 1, "802.3", 0, 0);
- insert_param( skb, "IRLAN_VER", 3, NULL, 0, 0x0101);
- break;
- case CMD_GET_MEDIA_CHAR:
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x05; /* 5 parameters */
- insert_param( skb, "FILTER_TYPE", 1, "DIRECTED", 0, 0);
- insert_param( skb, "FILTER_TYPE", 1, "BROADCAST", 0, 0);
- insert_param( skb, "FILTER_TYPE", 1, "MULTICAST", 0, 0);
- insert_param( skb, "ACCESS_TYPE", 1, "DIRECTED", 0, 0);
- insert_param( skb, "MAX_FRAME", 3, NULL, 0, 0x05ee);
- break;
- case CMD_OPEN_DATA_CHANNEL:
- skb->data[0] = 0x00; /* Success */
- skb->data[1] = 0x02; /* 2 parameters */
- insert_param( skb, "DATA_CHAN", 2, NULL, self->stsap_sel_data, 0);
- insert_param( skb, "RECONNECT_KEY", 1, "LINUX RULES!", 0, 0);
- break;
- case CMD_FILTER_OPERATION:
- handle_filter_request( self, skb);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown command!\n");
- break;
- }
-
- irttp_data_request( self->tsap_ctrl, skb);
-}
-
-/*
- * Function irlan_server_register(void)
- *
- * Register server support so we can accept incomming connections. We
- * must register both a TSAP for control and data
- *
- */
-void irlan_server_register(void)
-{
- struct notify_t notify;
- struct irlan_cb *self;
- struct ias_object *obj;
- struct tsap_cb *tsap;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- /*
- * Open irlan_server instance
- */
- self = irlan_server_open();
- if ( !self || self->magic != IRLAN_MAGIC) {
- DEBUG( 0, __FUNCTION__"(), Unable to open server!\n");
- return;
- }
- /*
- * First register well known control TSAP
- */
- irda_notify_init( &notify);
- notify.data_indication = irlan_server_control_data_indication;
- notify.connect_indication = irlan_server_connect_indication;
- notify.disconnect_indication = irlan_server_disconnect_indication;
- notify.instance = self;
- strncpy( notify.name, "IrLAN srv. ctrl", 16);
-
- /* FIXME: should not use a static value here! */
- tsap = irttp_open_tsap( TSAP_IRLAN, 1, &notify);
- if ( tsap == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Got no handle!!\n");
- return;
- }
- self->tsap_ctrl = tsap;
-
- /*
- * Now register data TSAP
- */
- irda_notify_init( &notify);
- notify.data_indication = irlan_eth_rx;
- notify.udata_indication = irlan_eth_rx;
- notify.connect_indication = irlan_server_connect_indication;
- notify.disconnect_indication = irlan_server_disconnect_indication;
- notify.instance = self;
- strncpy( notify.name, "IrLAN srv. data", 16);
-
- /*
- * Register well known address with IrTTP
- */
- tsap = irttp_open_tsap( LSAP_ANY, DEFAULT_INITIAL_CREDIT, &notify);
- if ( tsap == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Got no handle!\n");
- return;
- }
- self->tsap_data = tsap;
-
- /*
- * This is the data TSAP selector which we will pass to the client
- * when the client ask for it.
- */
- self->stsap_sel_data = tsap->stsap_sel;
- ASSERT( self->stsap_sel_data > 0, return;);
-
- DEBUG( 0, "irlan_server_register(), Using Source TSAP selector=%02x\n",
- self->stsap_sel_data);
-
- /*
- * Register with LM-IAS
- */
- obj = irias_new_object( "IrLAN", IAS_IRLAN_ID);
- irias_add_integer_attrib( obj, "IrDA:TinyTP:LsapSel", TSAP_IRLAN);
- irias_insert_object( obj);
-
- obj = irias_new_object( "PnP", IAS_PNP_ID);
- irias_add_string_attrib( obj, "Name", "Linux");
- irias_add_string_attrib( obj, "DeviceID", "HWP19F0");
- irias_add_integer_attrib( obj, "CompCnt", 2);
- irias_add_string_attrib( obj, "Comp#01", "PNP8294");
- irias_add_string_attrib( obj, "Comp#01", "PNP8389");
- irias_add_string_attrib( obj, "Manufacturer", "Linux/IR Project");
- irias_insert_object( obj);
-}
-
-#ifdef MODULE
-
-MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
-MODULE_DESCRIPTION("The Linux IrDA LAN Server protocol");
-
-/*
- * Function init_module (void)
- *
- * Initialize the IrLAN module, this function is called by the
- * modprobe(1) program.
- */
-int init_module(void)
-{
-/* int result; */
-
- DEBUG( 4, "--> IrLAN irlan_server: init_module\n");
-
- irlan_server_init();
-
- DEBUG( 4, "IrLAN irlan_server: init_module -->\n");
-
- return 0;
-}
-
-/*
- * Function cleanup_module (void)
- *
- * Remove the IrLAN module, this function is called by the rmmod(1)
- * program
- */
-void cleanup_module(void)
-{
- DEBUG( 4, "--> irlan_server, cleanup_module\n");
- /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
-
- /* Free some memory */
- irlan_server_cleanup();
-
- DEBUG( 4, "irlan_server, cleanup_module -->\n");
-}
-
-#endif /* MODULE */
diff --git a/net/irda/irlan/irlan_srv_event.c b/net/irda/irlan/irlan_srv_event.c
deleted file mode 100644
index 6ad8d2f46..000000000
--- a/net/irda/irlan/irlan_srv_event.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*********************************************************************
- *
- * Filename: irlan_srv_event.c
- * Version: 0.1
- * Description: IrLAN Server FSM (Finite State Machine)
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Sun Aug 31 20:14:37 1997
- * Modified at: Wed Dec 9 02:39:05 1998
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#include <net/irda/irda.h>
-#include <net/irda/iriap.h>
-#include <net/irda/irlmp.h>
-#include <net/irda/irttp.h>
-
-#include <net/irda/irlan_srv.h>
-#include <net/irda/irlan_event.h>
-
-static int irlan_server_state_idle ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info);
-static int irlan_server_state_info ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info);
-static int irlan_server_state_open ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info);
-static int irlan_server_state_data ( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info);
-
-static int (*state[])( struct irlan_cb *self, IRLAN_EVENT event,
- struct sk_buff *skb, struct irlan_info *info) =
-{
- irlan_server_state_idle,
- NULL, /* Query */
- NULL, /* Info */
- irlan_server_state_info,
- NULL, /* Media */
- irlan_server_state_open,
- NULL, /* Wait */
- NULL, /* Arb */
- irlan_server_state_data,
- NULL, /* Close */
- NULL, /* Sync */
-};
-
-void irlan_do_server_event( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info)
-{
- (*state[ self->state]) ( self, event, skb, info);
-}
-
-/*
- * Function irlan_server_state_idle (event, skb, info)
- *
- * IDLE, We are waiting for an indication that there is a provider
- * available.
- */
-static int irlan_server_state_idle( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_CONNECT_INDICATION:
- ASSERT( info != NULL, return 0;);
- irlan_server_connect_response( self, info->tsap);
- irlan_next_state( self, IRLAN_INFO);
- break;
- default:
- DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n",
- event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
- return 0;
-}
-
-/*
- * Function irlan_server_state_info (self, event, skb, info)
- *
- * INFO, We have issued a GetInfo command and is awaiting a reply.
- */
-static int irlan_server_state_info( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info)
-{
- int ret;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_GET_INFO_CMD:
- irlan_server_send_reply( self, CMD_GET_PROVIDER_INFO,
- RSP_SUCCESS);
- /* Keep state */
- break;
-
- case IRLAN_GET_MEDIA_CMD:
- irlan_server_send_reply( self, CMD_GET_MEDIA_CHAR,
- RSP_SUCCESS);
- /* Keep state */
- break;
-
- case IRLAN_OPEN_DATA_CMD:
- ret = irlan_parse_open_data_cmd( self, skb);
- irlan_server_send_reply( self, CMD_OPEN_DATA_CHANNEL, ret);
-
- if ( ret == RSP_SUCCESS)
- irlan_next_state( self, IRLAN_OPEN);
- break;
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, "irlan_server_state_info, Unknown event %d\n",
- event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_server_state_open (self, event, skb, info)
- *
- * OPEN, The client has issued a OpenData command and is awaiting a
- * reply
- *
- */
-static int irlan_server_state_open( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info)
-{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
-
- switch( event) {
- case IRLAN_FILTER_CONFIG_CMD:
- irlan_server_extract_params( self, CMD_FILTER_OPERATION, skb);
- irlan_server_send_reply( self, CMD_FILTER_OPERATION,
- RSP_SUCCESS);
- /* Keep state */
- break;
-
- case IRLAN_DATA_CONNECT_INDICATION:
- DEBUG( 4, "DATA_CONNECT_INDICATION\n");
- irlan_next_state( self, IRLAN_DATA);
- irlan_server_connect_response( self, info->tsap);
- break;
-
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n",
- event);
- break;
- }
-
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-/*
- * Function irlan_server_state_data (self, event, skb, info)
- *
- * DATA, The data channel is connected, allowing data transfers between
- * the local and remote machines.
- *
- */
-static int irlan_server_state_data( struct irlan_cb *self,
- IRLAN_EVENT event,
- struct sk_buff *skb,
- struct irlan_info *info)
-{
- struct irmanager_event mgr_event;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRLAN_MAGIC, return -1;);
-
- switch( event) {
- case IRLAN_FILTER_CONFIG_CMD:
- irlan_server_extract_params( self, CMD_FILTER_OPERATION, skb);
- irlan_server_send_reply( self, CMD_FILTER_OPERATION,
- RSP_SUCCESS);
-
- /* Make sure the code below only runs once */
- if ( !self->connected) {
- mgr_event.event = EVENT_IRLAN_START;
- sprintf( mgr_event.devname, "%s", self->ifname);
- irmanager_notify( &mgr_event);
-
- self->connected = TRUE;
- }
- break;
-
- case IRLAN_LMP_DISCONNECT:
- case IRLAN_LAP_DISCONNECT:
- mgr_event.event = EVENT_IRLAN_STOP;
- sprintf( mgr_event.devname, "%s", self->ifname);
- irmanager_notify( &mgr_event);
-
- irlan_next_state( self, IRLAN_IDLE);
- break;
- default:
- DEBUG( 0, "irlan_server_state_data, Unknown event %d\n",
- event);
- break;
- }
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- return 0;
-}
-
-
-
-
-
-
-
-
-
-
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 633d29220..d24923652 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlap.c
- * Version: 0.8
+ * Version: 0.9
* Description: An IrDA LAP driver for Linux
- * Status: Experimental.
+ * Status: Stable.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sat Jan 16 22:19:27 1999
+ * Modified at: Fri Apr 23 10:12:29 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
@@ -43,12 +44,23 @@
#include <net/irda/irlap_comp.h>
hashbin_t *irlap = NULL;
+int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ;
-static void __irlap_close( struct irlap_cb *self);
+static void __irlap_close(struct irlap_cb *self);
+
+static char *lap_reasons[] = {
+ "ERROR, NOT USED",
+ "LAP_DISC_INDICATION",
+ "LAP_NO_RESPONSE",
+ "LAP_RESET_INDICATION",
+ "LAP_FOUND_NONE",
+ "LAP_MEDIA_BUSY",
+ "LAP_PRIMARY_CONFLICT",
+ "ERROR, NOT USED",
+};
#ifdef CONFIG_PROC_FS
-int irlap_proc_read( char *buf, char **start, off_t offset, int len,
- int unused);
+int irlap_proc_read( char *, char **, off_t, int, int);
#endif /* CONFIG_PROC_FS */
@@ -116,11 +128,17 @@ struct irlap_cb *irlap_open( struct irda_device *irdev)
skb_queue_head_init( &self->tx_list);
skb_queue_head_init( &self->wx_list);
- /* My unique IrLAP device address! :-) */
- self->saddr = jiffies;
+ /* My unique IrLAP device address! */
+ get_random_bytes(&self->saddr, sizeof(self->saddr));
- /* Generate random connection address for this session */
- self->caddr = jiffies & 0xfe;
+ /*
+ * Generate random connection address for this session, which must
+ * be 7 bits wide and different from 0x00 and 0xfe
+ */
+ while ((self->caddr == 0x00) || (self->caddr == 0xfe)) {
+ get_random_bytes(&self->caddr, sizeof(self->caddr));
+ self->caddr &= 0xfe;
+ }
init_timer( &self->slot_timer);
init_timer( &self->query_timer);
@@ -136,10 +154,8 @@ struct irlap_cb *irlap_open( struct irda_device *irdev)
hashbin_insert( irlap, (QUEUE *) self, self->saddr, NULL);
- irlmp_register_irlap( self, self->saddr, &self->notify);
+ irlmp_register_link( self, self->saddr, &self->notify);
- DEBUG( 4, "irlap_open -->\n");
-
return self;
}
@@ -166,15 +182,15 @@ static void __irlap_close( struct irlap_cb *self)
irlap_flush_all_queues( self);
self->irdev = NULL;
- self->magic = ~LAP_MAGIC;
+ self->magic = 0;
kfree( self);
}
/*
- * Function irlap_close ()
+ * Function irlap_close (self)
*
- *
+ * Remove IrLAP instance
*
*/
void irlap_close( struct irlap_cb *self)
@@ -188,127 +204,118 @@ void irlap_close( struct irlap_cb *self)
irlap_disconnect_indication( self, LAP_DISC_INDICATION);
- irlmp_unregister_irlap( self->saddr);
+ irlmp_unregister_link(self->saddr);
self->notify.instance = NULL;
/* Be sure that we manage to remove ourself from the hash */
lap = hashbin_remove( irlap, self->saddr, NULL);
if ( !lap) {
- DEBUG( 0, __FUNCTION__ "(), Didn't find myself!\n");
+ DEBUG( 1, __FUNCTION__ "(), Didn't find myself!\n");
return;
}
__irlap_close( lap);
}
/*
- * Function irlap_connect_indication ()
+ * Function irlap_connect_indication (self, skb)
*
* Another device is attempting to make a connection
*
*/
-void irlap_connect_indication( struct irlap_cb *self, struct sk_buff *skb)
+void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_init_qos_capabilities( self, NULL); /* No user QoS! */
+ irlap_init_qos_capabilities(self, NULL); /* No user QoS! */
- irlmp_link_connect_indication( self->notify.instance, &self->qos_tx,
- skb);
+ irlmp_link_connect_indication(self->notify.instance, self->saddr,
+ self->daddr, &self->qos_tx, skb);
}
/*
- * Function irlap_connect_response (void)
+ * Function irlap_connect_response (self, skb)
*
* Service user has accepted incomming connection
*
*/
-void irlap_connect_response( struct irlap_cb *self, struct sk_buff *skb)
+void irlap_connect_response(struct irlap_cb *self, struct sk_buff *skb)
{
DEBUG( 4, __FUNCTION__ "()\n");
- irlap_do_event( self, CONNECT_RESPONSE, skb, NULL);
+ irlap_do_event(self, CONNECT_RESPONSE, skb, NULL);
}
/*
- * Function irlap_connect_request (daddr, qos, sniff)
+ * Function irlap_connect_request (self, daddr, qos_user, sniff)
*
* Request connection with another device, sniffing is not implemented
* yet.
+ *
*/
-void irlap_connect_request( struct irlap_cb *self, __u32 daddr,
- struct qos_info *qos_user, int sniff)
+void irlap_connect_request(struct irlap_cb *self, __u32 daddr,
+ struct qos_info *qos_user, int sniff)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(3, __FUNCTION__ "(), daddr=0x%08x\n", daddr);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
self->daddr = daddr;
-
+
/*
* If the service user specifies QoS values for this connection,
* then use them
*/
- irlap_init_qos_capabilities( self, qos_user);
+ irlap_init_qos_capabilities(self, qos_user);
- if ( self->state == LAP_NDM) {
- irlap_do_event( self, CONNECT_REQUEST, NULL, NULL);
- } else {
- DEBUG( 0, __FUNCTION__ "() Wrong state!\n");
-
- irlap_disconnect_indication( self, LAP_MEDIA_BUSY);
- }
-
+ if ((self->state == LAP_NDM) &&
+ !irda_device_is_media_busy(self->irdev))
+ irlap_do_event(self, CONNECT_REQUEST, NULL, NULL);
+ else
+ self->connect_pending = TRUE;
}
/*
- * Function irlap_connect_confirm (void)
+ * Function irlap_connect_confirm (self, skb)
*
- * Connection request is accepted
+ * Connection request has been accepted
*
*/
-void irlap_connect_confirm( struct irlap_cb *self, struct sk_buff *skb)
+void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlmp_link_connect_confirm( self->notify.instance, &self->qos_tx, skb);
+ irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb);
}
/*
- * Function irlap_data_indication (skb)
+ * Function irlap_data_indication (self, skb)
*
* Received data frames from IR-port, so we just pass them up to
* IrLMP for further processing
*
*/
-inline void irlap_data_indication( struct irlap_cb *self, struct sk_buff *skb)
+inline void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
/* Hide LAP header from IrLMP layer */
- skb_pull( skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
+ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER);
#ifdef CONFIG_IRDA_COMPRESSION
- if ( self->qos_tx.compression.value) {
- skb = irlap_decompress_frame( self, skb);
- if ( !skb) {
- DEBUG( 0, __FUNCTION__ "(), Decompress error!\n");
+ if (self->qos_tx.compression.value) {
+ skb = irlap_decompress_frame(self, skb);
+ if (!skb) {
+ DEBUG(1, __FUNCTION__ "(), Decompress error!\n");
return;
}
}
#endif
-
- irlmp_link_data_indication( self->notify.instance, LAP_RELIABLE, skb);
+ irlmp_link_data_indication(self->notify.instance, LAP_RELIABLE, skb);
}
/*
@@ -319,7 +326,7 @@ inline void irlap_data_indication( struct irlap_cb *self, struct sk_buff *skb)
*/
void irlap_unit_data_indication( struct irlap_cb *self, struct sk_buff *skb)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG( 1, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
@@ -333,7 +340,7 @@ void irlap_unit_data_indication( struct irlap_cb *self, struct sk_buff *skb)
skb = irlap_decompress_frame( self, skb);
if ( !skb) {
- DEBUG( 0, __FUNCTION__ "(), Decompress error!\n");
+ DEBUG( 1, __FUNCTION__ "(), Decompress error!\n");
return;
}
}
@@ -356,14 +363,14 @@ inline void irlap_data_request( struct irlap_cb *self, struct sk_buff *skb,
ASSERT( self->magic == LAP_MAGIC, return;);
ASSERT( skb != NULL, return;);
- DEBUG( 4, "irlap_data_request: tx_list=%d\n",
+ DEBUG( 4, __FUNCTION__ "(), tx_list=%d\n",
skb_queue_len( &self->tx_list));
#ifdef CONFIG_IRDA_COMPRESSION
if ( self->qos_tx.compression.value) {
skb = irlap_compress_frame( self, skb);
if ( !skb) {
- DEBUG( 0, __FUNCTION__ "(), Compress error!\n");
+ DEBUG( 1, __FUNCTION__ "(), Compress error!\n");
return;
}
}
@@ -401,8 +408,7 @@ inline void irlap_data_request( struct irlap_cb *self, struct sk_buff *skb,
}
irlap_do_event( self, SEND_I_CMD, skb, NULL);
} else
- skb_queue_tail( &self->tx_list, skb);
-
+ skb_queue_tail( &self->tx_list, skb);
}
/*
@@ -410,14 +416,26 @@ inline void irlap_data_request( struct irlap_cb *self, struct sk_buff *skb,
*
* Request to disconnect connection by service user
*/
-void irlap_disconnect_request( struct irlap_cb *self)
+void irlap_disconnect_request(struct irlap_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(3, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
-
- irlap_do_event( self, DISCONNECT_REQUEST, NULL, NULL);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+
+ switch (self->state) {
+ case LAP_XMIT_P: /* FALLTROUGH */
+ case LAP_XMIT_S: /* FALLTROUGH */
+ case LAP_CONN: /* FALLTROUGH */
+ case LAP_RESET_WAIT: /* FALLTROUGH */
+ case LAP_RESET_CHECK:
+ irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL);
+ break;
+ default:
+ DEBUG(0, __FUNCTION__ "(), disconnect pending!\n");
+ self->disconnect_pending = TRUE;
+ break;
+ }
}
/*
@@ -428,7 +446,7 @@ void irlap_disconnect_request( struct irlap_cb *self)
*/
void irlap_disconnect_indication( struct irlap_cb *self, LAP_REASON reason)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG( 1, __FUNCTION__ "(), reason=%s\n", lap_reasons[reason]);
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
@@ -436,24 +454,23 @@ void irlap_disconnect_indication( struct irlap_cb *self, LAP_REASON reason)
#ifdef CONFIG_IRDA_COMPRESSION
irda_free_compression( self);
#endif
-
/* Flush queues */
irlap_flush_all_queues( self);
switch( reason) {
case LAP_RESET_INDICATION:
- DEBUG( 0, "Sending reset request!\n");
+ DEBUG( 1, __FUNCTION__ "(), Sending reset request!\n");
irlap_do_event( self, RESET_REQUEST, NULL, NULL);
break;
- case LAP_NO_RESPONSE:
- case LAP_DISC_INDICATION:
- case LAP_FOUND_NONE:
+ case LAP_NO_RESPONSE: /* FALLTROUGH */
+ case LAP_DISC_INDICATION: /* FALLTROUGH */
+ case LAP_FOUND_NONE: /* FALLTROUGH */
case LAP_MEDIA_BUSY:
irlmp_link_disconnect_indication( self->notify.instance,
self, reason, NULL);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Reason %d not implemented!\n",
+ DEBUG( 1, __FUNCTION__ "(), Reason %d not implemented!\n",
reason);
}
}
@@ -464,31 +481,55 @@ void irlap_disconnect_indication( struct irlap_cb *self, LAP_REASON reason)
* Start one single discovery operation.
*
*/
-void irlap_discovery_request( struct irlap_cb *self, DISCOVERY *discovery)
+void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
{
struct irlap_info info;
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
ASSERT( discovery != NULL, return;);
+
+ DEBUG( 4, __FUNCTION__ "(), nslots = %d\n", discovery->nslots);
+ ASSERT(( discovery->nslots == 1) || ( discovery->nslots == 6) ||
+ ( discovery->nslots == 8) || ( discovery->nslots == 16),
+ return;);
+
/*
* Discovery is only possible in NDM mode
*/
if ( self->state == LAP_NDM) {
ASSERT( self->discovery_log == NULL, return;);
self->discovery_log= hashbin_new( HB_LOCAL);
-
- info.S = 6; /* Number of slots */
+
+ info.S = discovery->nslots; /* Number of slots */
info.s = 0; /* Current slot */
-
+
self->discovery_cmd = discovery;
info.discovery = discovery;
+ /* Check if the slot timeout is within limits */
+ if (sysctl_slot_timeout < 20) {
+ ERROR(__FUNCTION__
+ "(), to low value for slot timeout!\n");
+ sysctl_slot_timeout = 20;
+ }
+ /*
+ * Highest value is actually 8, but we allow higher since
+ * some devices seems to require it.
+ */
+ if (sysctl_slot_timeout > 160) {
+ ERROR(__FUNCTION__
+ "(), to high value for slot timeout!\n");
+ sysctl_slot_timeout = 160;
+ }
+
+ self->slot_timeout = sysctl_slot_timeout * HZ / 1000;
+
irlap_do_event( self, DISCOVERY_REQUEST, NULL, &info);
} else {
DEBUG( 4, __FUNCTION__
- "(), discovery only possible in NDM mode\n");
+ "(), discovery only possible in NDM mode\n");
irlap_discovery_confirm( self, NULL);
}
}
@@ -506,6 +547,14 @@ void irlap_discovery_confirm( struct irlap_cb *self, hashbin_t *discovery_log)
ASSERT( self->notify.instance != NULL, return;);
+ /*
+ * Check for successful discovery, since we are then allowed to clear
+ * the media busy condition (irlap p.94). This should allow us to make
+ * connection attempts much easier.
+ */
+ if (discovery_log && hashbin_get_size(discovery_log) > 0)
+ irda_device_set_media_busy(self->irdev, FALSE);
+
/* Inform IrLMP */
irlmp_link_discovery_confirm( self->notify.instance, discovery_log);
@@ -521,7 +570,7 @@ void irlap_discovery_confirm( struct irlap_cb *self, hashbin_t *discovery_log)
* Somebody is trying to discover us!
*
*/
-void irlap_discovery_indication( struct irlap_cb *self, DISCOVERY *discovery)
+void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery)
{
DEBUG( 4, __FUNCTION__ "()\n");
@@ -531,7 +580,7 @@ void irlap_discovery_indication( struct irlap_cb *self, DISCOVERY *discovery)
ASSERT( self->notify.instance != NULL, return;);
- irlmp_discovery_indication( self->notify.instance, discovery);
+ irlmp_link_discovery_indication(self->notify.instance, discovery);
}
/*
@@ -540,7 +589,7 @@ void irlap_discovery_indication( struct irlap_cb *self, DISCOVERY *discovery)
*
*
*/
-void irlap_status_indication( int quality_of_link)
+void irlap_status_indication(int quality_of_link)
{
switch( quality_of_link) {
case STATUS_NO_ACTIVITY:
@@ -552,8 +601,7 @@ void irlap_status_indication( int quality_of_link)
default:
break;
}
- /* TODO: layering violation! */
- irlmp_status_indication( quality_of_link, NO_CHANGE);
+ irlmp_status_indication(quality_of_link, LOCK_NO_CHANGE);
}
/*
@@ -564,7 +612,7 @@ void irlap_status_indication( int quality_of_link)
*/
void irlap_reset_indication( struct irlap_cb *self)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG( 1, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
@@ -583,7 +631,7 @@ void irlap_reset_indication( struct irlap_cb *self)
*/
void irlap_reset_confirm(void)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG( 1, __FUNCTION__ "()\n");
}
/*
@@ -672,7 +720,7 @@ int irlap_validate_ns_received( struct irlap_cb *self, int ns)
/* ns as expected? */
if ( ns == self->vr) {
- DEBUG( 4, "*** irlap_validate_ns_received: expected!\n");
+ DEBUG( 4, __FUNCTION__ "(), expected!\n");
return NS_EXPECTED;
}
/*
@@ -696,7 +744,7 @@ int irlap_validate_nr_received( struct irlap_cb *self, int nr)
/* nr as expected? */
if ( nr == self->vs) {
- DEBUG( 4, "*** irlap_validate_nr_received: expected!\n");
+ DEBUG( 4, __FUNCTION__ "(), expected!\n");
return NR_EXPECTED;
}
@@ -724,7 +772,7 @@ int irlap_validate_nr_received( struct irlap_cb *self, int nr)
*/
void irlap_initiate_connection_state( struct irlap_cb *self)
{
- DEBUG( 4, "irlap_initiate_connection_state()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
@@ -749,28 +797,22 @@ void irlap_initiate_connection_state( struct irlap_cb *self)
* frame in order to delay for the specified amount of time. This is
* done to avoid using timers, and the forbidden udelay!
*/
-void irlap_wait_min_turn_around( struct irlap_cb *self, struct qos_info *qos)
+void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos)
{
int usecs;
int speed;
- int bytes = 0;
+ int bytes ;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( qos != NULL, return;);
-
/* Get QoS values. */
speed = qos->baud_rate.value;
usecs = qos->min_turn_time.value;
/* No need to calculate XBOFs for speeds over 115200 bps */
- if ( speed > 115200) {
+ if (speed > 115200) {
self->mtt_required = usecs;
return;
}
- DEBUG( 4, __FUNCTION__ "(), delay=%d usecs\n", usecs);
-
/*
* Send additional BOF's for the next frame for the requested
* min turn time, so now we must calculate how many chars (XBOF's) we
@@ -778,8 +820,6 @@ void irlap_wait_min_turn_around( struct irlap_cb *self, struct qos_info *qos)
*/
bytes = speed * usecs / 10000000;
- DEBUG( 4, __FUNCTION__ "(), xbofs delay = %d\n", bytes);
-
self->xbofs_delay = bytes;
}
@@ -818,26 +858,26 @@ void irlap_flush_all_queues( struct irlap_cb *self)
* Change the speed of the IrDA port
*
*/
-void irlap_change_speed( struct irlap_cb *self, int speed)
+void irlap_change_speed(struct irlap_cb *self, int speed)
{
- DEBUG( 4, __FUNCTION__ "(), setting speed to %d\n", speed);
+ DEBUG(4, __FUNCTION__ "(), setting speed to %d\n", speed);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- if ( !self->irdev) {
- DEBUG( 0, __FUNCTION__ "(), driver missing!\n");
+ if (!self->irdev) {
+ DEBUG( 1, __FUNCTION__ "(), driver missing!\n");
return;
}
- irda_device_change_speed( self->irdev, speed);
+ irda_device_change_speed(self->irdev, speed);
self->qos_rx.baud_rate.value = speed;
self->qos_tx.baud_rate.value = speed;
}
#ifdef CONFIG_IRDA_COMPRESSION
-void irlap_init_comp_qos_capabilities( struct irlap_cb *self)
+void irlap_init_comp_qos_capabilities(struct irlap_cb *self)
{
struct irda_compressor *comp;
__u8 mask; /* Current bit tested */
@@ -888,8 +928,8 @@ void irlap_init_comp_qos_capabilities( struct irlap_cb *self)
* IrLAP itself. Normally, IrLAP will not specify any values, but it can
* be used to restrict certain values.
*/
-void irlap_init_qos_capabilities( struct irlap_cb *self,
- struct qos_info *qos_user)
+void irlap_init_qos_capabilities(struct irlap_cb *self,
+ struct qos_info *qos_user)
{
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
@@ -911,8 +951,8 @@ void irlap_init_qos_capabilities( struct irlap_cb *self,
* allowed to supply these values. We check each parameter since the
* user may not have set all of them.
*/
- if ( qos_user != NULL) {
- DEBUG( 0, __FUNCTION__ "(), Found user specified QoS!\n");
+ if (qos_user) {
+ DEBUG( 1, __FUNCTION__ "(), Found user specified QoS!\n");
if ( qos_user->baud_rate.bits)
self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits;
@@ -966,6 +1006,7 @@ void irlap_apply_default_connection_parameters( struct irlap_cb *self)
self->bofs_count = 11;
/* Use these until connection has been made */
+ self->slot_timeout = sysctl_slot_timeout;
self->final_timeout = FINAL_TIMEOUT;
self->poll_timeout = POLL_TIMEOUT;
self->wd_timeout = WD_TIMEOUT;
@@ -973,7 +1014,9 @@ void irlap_apply_default_connection_parameters( struct irlap_cb *self)
self->qos_tx.data_size.value = 64;
self->qos_tx.additional_bofs.value = 11;
- irlap_flush_all_queues( self);
+ irlap_flush_all_queues(self);
+
+ self->disconnect_pending = FALSE;
}
/*
@@ -982,8 +1025,8 @@ void irlap_apply_default_connection_parameters( struct irlap_cb *self)
* Initialize IrLAP with the negotiated QoS values
*
*/
-void irlap_apply_connection_parameters( struct irlap_cb *self,
- struct qos_info *qos)
+void irlap_apply_connection_parameters(struct irlap_cb *self,
+ struct qos_info *qos)
{
DEBUG( 4, __FUNCTION__ "()\n");
@@ -1010,30 +1053,27 @@ void irlap_apply_connection_parameters( struct irlap_cb *self,
* TODO: these values should be calculated from the final timer
* as well
*/
- if ( qos->link_disc_time.value == 3)
+ if (qos->link_disc_time.value == 3)
self->N1 = 0;
else
- /* self->N1 = 6; */
self->N1 = 3000 / qos->max_turn_time.value;
DEBUG( 4, "Setting N1 = %d\n", self->N1);
- /* self->N2 = qos->link_disc_time.value * 2; */
self->N2 = qos->link_disc_time.value * 1000 / qos->max_turn_time.value;
DEBUG( 4, "Setting N2 = %d\n", self->N2);
/*
* Initialize timeout values, some of the rules are listed on
- * page 92 in IrLAP. Divide by 10 since the kernel timers has a
- * resolution of 10 ms.
+ * page 92 in IrLAP.
*/
- self->poll_timeout = qos->max_turn_time.value / 10;
- self->final_timeout = qos->max_turn_time.value / 10;
+ self->poll_timeout = qos->max_turn_time.value * HZ / 1000;
+ self->final_timeout = qos->max_turn_time.value * HZ / 1000;
self->wd_timeout = self->poll_timeout * 2;
#ifdef CONFIG_IRDA_COMPRESSION
if ( qos->compression.value) {
- DEBUG( 0, __FUNCTION__ "(), Initializing compression\n");
+ DEBUG( 1, __FUNCTION__ "(), Initializing compression\n");
irda_set_compression( self, qos->compression.value);
irlap_compressor_init( self, 0);
@@ -1065,7 +1105,7 @@ int irlap_proc_read( char *buf, char **start, off_t offset, int len,
ASSERT( self != NULL, return -ENODEV;);
ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
- len += sprintf( buf+len, "IrLAP[%d] <-> %s ",
+ len += sprintf( buf+len, "irlap%d <-> %s ",
i++, self->irdev->name);
len += sprintf( buf+len, "state: %s\n",
irlap_state[ self->state]);
diff --git a/net/irda/irlap_comp.c b/net/irda/irlap_comp.c
index d2e876be9..9959b64bc 100644
--- a/net/irda/irlap_comp.c
+++ b/net/irda/irlap_comp.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Fri Oct 9 09:18:07 1998
- * Modified at: Mon Dec 14 11:55:27 1998
+ * Modified at: Mon Feb 8 01:23:52 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: ppp.c, isdn_ppp.c
*
@@ -81,8 +81,7 @@ void irda_unregister_compressor ( struct compressor *cp)
if ( !node) {
DEBUG( 0, __FUNCTION__ "(), compressor not found!\n");
return;
- }
-
+ }
kfree( node);
}
@@ -99,7 +98,7 @@ int irda_set_compression( struct irlap_cb *self, int proto)
__u8 options[CILEN_DEFLATE];
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return -ENODEV;);
ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
@@ -180,7 +179,7 @@ void irlap_compressor_init( struct irlap_cb *self, int compress)
int debug = TRUE;
__u8 options[CILEN_DEFLATE];
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
@@ -240,8 +239,8 @@ struct sk_buff *irlap_compress_frame( struct irlap_cb *self,
ASSERT( skb != NULL, return NULL;);
- DEBUG( 0, __FUNCTION__ "() skb->len=%d, jiffies=%ld\n", (int) skb->len,
- jiffies);
+ DEBUG(4, __FUNCTION__ "() skb->len=%d, jiffies=%ld\n", (int) skb->len,
+ jiffies);
ASSERT( self != NULL, return NULL;);
ASSERT( self->magic == LAP_MAGIC, return NULL;);
@@ -267,7 +266,7 @@ struct sk_buff *irlap_compress_frame( struct irlap_cb *self,
skb->data, new_skb->data,
skb->len, new_skb->len);
if( count <= 0) {
- DEBUG( 0, __FUNCTION__ "(), Unable to compress frame!\n");
+ DEBUG(4, __FUNCTION__ "(), Unable to compress frame!\n");
dev_kfree_skb( new_skb);
/* Tell peer that this frame is not compressed */
@@ -284,8 +283,8 @@ struct sk_buff *irlap_compress_frame( struct irlap_cb *self,
dev_kfree_skb( skb);
- DEBUG( 0, __FUNCTION__ "() new_skb->len=%d\n, jiffies=%ld",
- (int) new_skb->len, jiffies);
+ DEBUG(4, __FUNCTION__ "() new_skb->len=%d\n, jiffies=%ld",
+ (int) new_skb->len, jiffies);
return new_skb;
}
@@ -336,7 +335,7 @@ struct sk_buff *irlap_decompress_frame( struct irlap_cb *self,
count = irda_decompress( self->decompressor.state, skb->data,
skb->len, new_skb->data, new_skb->len);
if ( count <= 0) {
- DEBUG( 0, __FUNCTION__ "(), Unable to decompress frame!\n");
+ DEBUG( 4, __FUNCTION__ "(), Unable to decompress frame!\n");
dev_kfree_skb( new_skb);
return skb;
@@ -344,7 +343,7 @@ struct sk_buff *irlap_decompress_frame( struct irlap_cb *self,
skb_trim( new_skb, count);
- DEBUG( 0, __FUNCTION__ "() new_skb->len=%d\n", (int) new_skb->len);
+ DEBUG( 4, __FUNCTION__ "() new_skb->len=%d\n", (int) new_skb->len);
return new_skb;
}
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index d2afcbd29..a2fbadf65 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlap_event.c
- * Version: 0.1
+ * Version: 0.8
* Description: IrLAP state machine implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Tue Jan 19 22:58:45 1999
+ * Modified at: Fri Apr 23 11:55:12 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -40,38 +40,42 @@
#include <net/irda/irda_device.h>
-static int irlap_state_ndm ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_query ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_reply ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_conn ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_setup ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_offline( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_xmit_p ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_pclose ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_nrm_p ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
+#if CONFIG_IRDA_FAST_RR
+int sysctl_fast_poll_increase = 50;
+#endif
+
+static int irlap_state_ndm (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_query (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_reply (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_conn (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_setup (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_xmit_p (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_pclose (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_nrm_p (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_reset ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_nrm_s ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_xmit_s ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_sclose ( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info);
-static int irlap_state_reset_check( struct irlap_cb *, IRLAP_EVENT event,
- struct sk_buff *, struct irlap_info *);
-
-static char *irlap_event[] = {
+static int irlap_state_reset (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_nrm_s (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_xmit_s (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info);
+static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
+ struct sk_buff *, struct irlap_info *);
+
+static const char *irlap_event[] = {
"DISCOVERY_REQUEST",
"CONNECT_REQUEST",
"CONNECT_RESPONSE",
@@ -84,6 +88,7 @@ static char *irlap_event[] = {
"RECV_DISCOVERY_XID_RSP",
"RECV_SNRM_CMD",
"RECV_TEST_CMD",
+ "RECV_TEST_RSP",
"RECV_UA_RSP",
"RECV_DM_RSP",
"RECV_I_CMD",
@@ -103,7 +108,7 @@ static char *irlap_event[] = {
"BACKOFF_TIMER_EXPIRED",
};
-char *irlap_state[] = {
+const char *irlap_state[] = {
"LAP_NDM",
"LAP_QUERY",
"LAP_REPLY",
@@ -144,37 +149,37 @@ static int (*state[])( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Function irda_poll_timer_expired (data)
*
- *
- *
+ * Poll timer has expired. Normally we must now send a RR frame to the
+ * remote device
*/
-static void irlap_poll_timer_expired( unsigned long data)
+static void irlap_poll_timer_expired(unsigned long data)
{
struct irlap_cb *self = (struct irlap_cb *) data;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- irlap_do_event( self, POLL_TIMER_EXPIRED, NULL, NULL);
+ irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
}
-void irlap_start_poll_timer( struct irlap_cb *self, int timeout)
+void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
#ifdef CONFIG_IRDA_FAST_RR
- if ( skb_queue_len( &self->tx_list) == 0) {
- if ( self->fast_RR == TRUE) {
+ if (skb_queue_len(&self->tx_list) == 0) {
+ if (self->fast_RR == TRUE) {
/*
* Assert that the fast poll timer has not reached the
* normal poll timer yet
*/
- if ( self->fast_RR_timeout < timeout) {
+ if (self->fast_RR_timeout < timeout) {
/*
* FIXME: this should be a more configurable
* function
*/
- self->fast_RR_timeout += 15;
+ self->fast_RR_timeout += (sysctl_fast_poll_increase * HZ/1000);
/* Use this fast(er) timeout instead */
timeout = self->fast_RR_timeout;
@@ -182,69 +187,84 @@ void irlap_start_poll_timer( struct irlap_cb *self, int timeout)
} else {
self->fast_RR = TRUE;
- /* Start with just 1 ms */
- self->fast_RR_timeout = 1;
- timeout = 1;
+ /* Start with just 0 ms */
+ self->fast_RR_timeout = 0;
+ timeout = 0;
}
} else
self->fast_RR = FALSE;
- DEBUG( 4, __FUNCTION__ "(), Timeout=%d\n", timeout);
+ DEBUG(4, __FUNCTION__ "(), Timeout=%d\n", timeout);
#endif
- irda_start_timer( &self->poll_timer, timeout,
- (unsigned long) self, irlap_poll_timer_expired);
+ if (timeout == 0)
+ irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
+ else
+ irda_start_timer(&self->poll_timer, timeout,
+ (unsigned long) self, irlap_poll_timer_expired);
}
/*
* Function irlap_do_event (event, skb, info)
*
- * Rushes through the state machine without any delay. If state = XMIT
+ * Rushes through the state machine without any delay. If state == XMIT
* then send queued data frames.
*/
void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
int ret;
- int iter = 0;
- if ( !self || self->magic != LAP_MAGIC) {
- DEBUG( 0, "irlap_do_event: bad pointer *self\n");
+ if (!self || self->magic != LAP_MAGIC)
return;
- }
-
- DEBUG( 4, "irlap_do_event: event = %s, state = %s\n",
- irlap_event[ event], irlap_state[ self->state]);
-
- /*
- * Do event, this implementation does not deal with pending events.
- * This is because I don't see the need for this. DB
- */
- ret = (*state[ self->state]) ( self, event, skb, info);
+
+ DEBUG(4, __FUNCTION__ "(), event = %s, state = %s\n",
+ irlap_event[ event], irlap_state[ self->state]);
+
+ ret = (*state[ self->state]) (self, event, skb, info);
/*
- * Check if we have switched to XMIT state? If so, send queued data
- * frames if any, if -1 is returned it means that we are not allowed
- * to send any more frames.
+ * Check if there are any pending events that needs to be executed
*/
- while (( self->state == LAP_XMIT_P) || ( self->state == LAP_XMIT_S)) {
- if ( skb_queue_len( &self->tx_list) > 0) {
+ switch (self->state) {
+ case LAP_XMIT_P: /* FALLTHROUGH */
+ case LAP_XMIT_S:
+ /*
+ * Check if there are any queued data frames, and do not
+ * try to disconnect link if we send any data frames, since
+ * that will change the state away form XMIT
+ */
+ if (skb_queue_len(&self->tx_list)) {
+ /* Try to send away all queued data frames */
+ while ((skb = skb_dequeue(&self->tx_list)) != NULL) {
+ ret = (*state[ self->state])(self, SEND_I_CMD,
+ skb, NULL);
+ if ( ret == -EPROTO)
+ break; /* Try again later! */
+ }
+ } else if (self->disconnect_pending) {
+ DEBUG(0, __FUNCTION__ "(), disconnecting!\n");
+ self->disconnect_pending = FALSE;
- struct sk_buff *skb = skb_dequeue( &self->tx_list);
- ASSERT( skb != NULL, return;);
+ ret = (*state[self->state])(self, DISCONNECT_REQUEST,
+ NULL, NULL);
+ }
+ break;
+ case LAP_NDM:
+ /* Check if we should try to connect */
+ if ((self->connect_pending) &&
+ !irda_device_is_media_busy(self->irdev))
+ {
+ self->connect_pending = FALSE;
- DEBUG( 4, "** Sending queued data frames\n");
- ret = (*state[ self->state])( self, SEND_I_CMD, skb,
- NULL);
- if ( ret == -EPROTO)
- return; /* Try again later! */
- } else
- return;
-
- /* Just in case :-) */
- if (iter++ > 100) {
- DEBUG( 0, __FUNCTION__ "(), *** breaking!! ***\n");
- return;
+ ret = (*state[self->state])(self, CONNECT_REQUEST,
+ NULL, NULL);
}
+ break;
+/* case LAP_CONN: */
+/* case LAP_RESET_WAIT: */
+/* case LAP_RESET_CHECK: */
+ default:
+ break;
}
}
@@ -256,10 +276,8 @@ void irlap_do_event( struct irlap_cb *self, IRLAP_EVENT event,
*/
void irlap_next_state( struct irlap_cb *self, IRLAP_STATE state)
{
- if ( !self || self->magic != LAP_MAGIC) {
- DEBUG( 4, "irlap_next_state: I have lost myself!\n");
+ if ( !self || self->magic != LAP_MAGIC)
return;
- }
DEBUG( 4, "next LAP state = %s\n", irlap_state[ state]);
@@ -284,7 +302,7 @@ void irlap_next_state( struct irlap_cb *self, IRLAP_STATE state)
static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
{
- DISCOVERY *discovery_rsp;
+ discovery_t *discovery_rsp;
int ret = 0;
DEBUG( 4, __FUNCTION__ "()\n");
@@ -296,7 +314,7 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
case CONNECT_REQUEST:
ASSERT( self->irdev != NULL, return -1;);
- if ( irda_device_is_media_busy( self->irdev)) {
+ if (irda_device_is_media_busy(self->irdev)) {
DEBUG( 0, __FUNCTION__
"(), CONNECT_REQUEST: media busy!\n");
@@ -308,7 +326,7 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
irlap_send_snrm_frame( self, &self->qos_rx);
/* Start Final-bit timer */
- irlap_start_final_timer( self, self->final_timeout);
+ irlap_start_final_timer(self, self->final_timeout);
self->retry_count = 0;
irlap_next_state( self, LAP_SETUP);
@@ -327,26 +345,26 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
case DISCOVERY_REQUEST:
ASSERT( info != NULL, return -1;);
- if ( irda_device_is_media_busy( self->irdev)) {
- DEBUG(0, "irlap_discovery_request: media busy!\n");
+ if (irda_device_is_media_busy(self->irdev)) {
+ DEBUG(0, __FUNCTION__ "(), media busy!\n");
/* irlap->log.condition = MEDIA_BUSY; */
/* Always switch state before calling upper layers */
- irlap_next_state( self, LAP_NDM);
+ irlap_next_state(self, LAP_NDM);
/* This will make IrLMP try again */
- irlap_discovery_confirm( self, NULL);
+ irlap_discovery_confirm(self, NULL);
return 0;
}
self->S = info->S;
self->s = info->s;
- irlap_send_discovery_xid_frame( self, info->S, info->s, TRUE,
- info->discovery);
+ irlap_send_discovery_xid_frame(self, info->S, info->s, TRUE,
+ info->discovery);
self->s++;
- irlap_start_slot_timer( self, SLOT_TIMEOUT);
- irlap_next_state( self, LAP_QUERY);
+ irlap_start_slot_timer(self, self->slot_timeout);
+ irlap_next_state(self, LAP_QUERY);
break;
case RECV_DISCOVERY_XID_CMD:
@@ -354,20 +372,20 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
/* Assert that this is not the final slot */
if ( info->s <= info->S) {
- self->daddr = info->daddr;
- self->slot = irlap_generate_rand_time_slot( info->S,
- info->s);
+ /* self->daddr = info->daddr; */
+ self->slot = irlap_generate_rand_time_slot(info->S,
+ info->s);
DEBUG( 4, "XID_CMD: S=%d, s=%d, slot %d\n", info->S,
info->s, self->slot);
if ( self->slot == info->s) {
discovery_rsp = irlmp_get_discovery_response();
-
- DEBUG( 4, "Sending XID rsp 1\n");
- irlap_send_discovery_xid_frame( self, info->S,
- self->slot,
- FALSE,
- discovery_rsp);
+ discovery_rsp->daddr = info->daddr;
+
+ irlap_send_discovery_xid_frame(self, info->S,
+ self->slot,
+ FALSE,
+ discovery_rsp);
self->frame_sent = TRUE;
} else
self->frame_sent = FALSE;
@@ -376,11 +394,21 @@ static int irlap_state_ndm( struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state( self, LAP_REPLY);
}
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
+ break;
+
+ case RECV_TEST_CMD:
+ skb_pull(skb, sizeof(struct test_frame));
+ irlap_send_test_frame(self, info->daddr, skb);
+ dev_kfree_skb(skb);
+ break;
+ case RECV_TEST_RSP:
+ DEBUG(0, __FUNCTION__ "() not implemented!\n");
+ dev_kfree_skb(skb);
break;
-
default:
- /* DEBUG( 0, "irlap_state_ndm: Unknown event"); */
+ DEBUG(2, __FUNCTION__ "(), Unknown event %s",
+ irlap_event[event]);
ret = -1;
break;
}
@@ -406,51 +434,51 @@ static int irlap_state_query( struct irlap_cb *self, IRLAP_EVENT event,
ASSERT( info != NULL, return -1;);
ASSERT( info->discovery != NULL, return -1;);
- DEBUG( 4, __FUNCTION__ "(), daddr=%08x\n",
- info->discovery->daddr);
+ DEBUG(4, __FUNCTION__ "(), daddr=%08x\n",
+ info->discovery->daddr);
hashbin_insert( self->discovery_log,
(QUEUE *) info->discovery,
info->discovery->daddr, NULL);
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
/* Keep state */
irlap_next_state( self, LAP_QUERY);
break;
case SLOT_TIMER_EXPIRED:
- if ( self->s < self->S) {
- irlap_send_discovery_xid_frame( self, self->S,
- self->s, TRUE,
- self->discovery_cmd);
+ if (self->s < self->S) {
+ irlap_send_discovery_xid_frame(self, self->S,
+ self->s, TRUE,
+ self->discovery_cmd);
self->s++;
- irlap_start_slot_timer( self, SLOT_TIMEOUT);
+ irlap_start_slot_timer(self, self->slot_timeout);
/* Keep state */
- irlap_next_state( self, LAP_QUERY);
+ irlap_next_state(self, LAP_QUERY);
} else {
/* This is the final slot! */
- irlap_send_discovery_xid_frame( self, self->S, 0xff,
- TRUE,
- self->discovery_cmd);
+ irlap_send_discovery_xid_frame(self, self->S, 0xff,
+ TRUE,
+ self->discovery_cmd);
/* Always switch state before calling upper layers */
- irlap_next_state( self, LAP_NDM);
+ irlap_next_state(self, LAP_NDM);
/*
* We are now finished with the discovery procedure,
* so now we must return the results
*/
- irlap_discovery_confirm( self, self->discovery_log);
+ irlap_discovery_confirm(self, self->discovery_log);
}
break;
default:
- DEBUG( 4, __FUNCTION__ "(), Unknown event %d, %s\n", event,
- irlap_event[event]);
+ DEBUG(2, __FUNCTION__ "(), Unknown event %d, %s\n", event,
+ irlap_event[event]);
- if ( skb != NULL) {
+ if (skb)
dev_kfree_skb( skb);
- }
+
ret = -1;
break;
}
@@ -464,58 +492,57 @@ static int irlap_state_query( struct irlap_cb *self, IRLAP_EVENT event,
* are waiting for the right time slot to send a response XID frame
*
*/
-static int irlap_state_reply( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
- DISCOVERY *discovery_rsp;
+ discovery_t *discovery_rsp;
int ret=0;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == LAP_MAGIC, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == LAP_MAGIC, return -1;);
- switch( event) {
+ switch(event) {
case QUERY_TIMER_EXPIRED:
- DEBUG( 0, __FUNCTION__ "(), QUERY_TIMER_EXPIRED <%ld>\n",
- jiffies);
- irlap_next_state( self, LAP_NDM);
+ DEBUG(2, __FUNCTION__ "(), QUERY_TIMER_EXPIRED <%ld>\n",
+ jiffies);
+ irlap_next_state(self, LAP_NDM);
break;
case RECV_DISCOVERY_XID_CMD:
- ASSERT( info != NULL, return -1;);
+ ASSERT(info != NULL, return -1;);
/*
* Last frame?
*/
- if ( info->s == 0xff) {
- del_timer( &self->query_timer);
+ if (info->s == 0xff) {
+ del_timer(&self->query_timer);
/* info->log.condition = REMOTE; */
/* Always switch state before calling upper layers */
- irlap_next_state( self, LAP_NDM);
+ irlap_next_state(self, LAP_NDM);
- irlap_discovery_indication( self, info->discovery);
- } else if (( info->s >= self->slot) &&
- ( !self->frame_sent)) {
- DEBUG( 4, "Sending XID rsp 2, s=%d\n", info->s);
+ irlap_discovery_indication(self, info->discovery);
+ } else if ((info->s >= self->slot) && (!self->frame_sent)) {
discovery_rsp = irlmp_get_discovery_response();
+ discovery_rsp->daddr = info->daddr;
- irlap_send_discovery_xid_frame( self, info->S,
- self->slot, FALSE,
- discovery_rsp);
+ irlap_send_discovery_xid_frame(self, info->S,
+ self->slot, FALSE,
+ discovery_rsp);
self->frame_sent = TRUE;
- irlap_next_state( self, LAP_REPLY);
+ irlap_next_state(self, LAP_REPLY);
}
- dev_kfree_skb( skb);
+ dev_kfree_skb(skb);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d, %s\n", event,
- irlap_event[event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event,
+ irlap_event[event]);
- if ( skb != NULL)
+ if ( skb)
dev_kfree_skb( skb);
-
+
ret = -1;
break;
}
@@ -529,17 +556,17 @@ static int irlap_state_reply( struct irlap_cb *self, IRLAP_EVENT event,
* layer to accept or refuse connection
*
*/
-static int irlap_state_conn( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
- DEBUG( 4, __FUNCTION__ "(), event=%s\n", irlap_event[ event]);
+ DEBUG(4, __FUNCTION__ "(), event=%s\n", irlap_event[ event]);
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == LAP_MAGIC, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == LAP_MAGIC, return -1;);
- switch( event) {
+ switch (event) {
case CONNECT_RESPONSE:
skb_pull( skb, 11);
@@ -571,20 +598,20 @@ static int irlap_state_conn( struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state( self, LAP_NDM);
#endif
break;
-
+
case RECV_DISCOVERY_XID_CMD:
DEBUG( 3, __FUNCTION__ "(), event RECV_DISCOVER_XID_CMD!\n");
irlap_next_state( self, LAP_NDM);
break;
-
+
case DISCONNECT_REQUEST:
irlap_send_dm_frame( self);
irlap_next_state( self, LAP_CONN);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d, %s\n", event,
- irlap_event[event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %d, %s\n", event,
+ irlap_event[event]);
ret = -1;
break;
}
@@ -682,7 +709,9 @@ static int irlap_state_setup( struct irlap_cb *self, IRLAP_EVENT event,
irlap_apply_connection_parameters( self, &self->qos_tx);
self->retry_count = 0;
- irlap_send_rr_frame( self, CMD_FRAME);
+
+ /* This frame will just be sent at the old speed */
+ /* irlap_send_rr_frame( self, CMD_FRAME); */
irlap_start_final_timer( self, self->final_timeout/2);
irlap_next_state( self, LAP_NRM_P);
@@ -724,9 +753,10 @@ static int irlap_state_offline( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Function irlap_state_xmit_p (self, event, skb, info)
*
- * XMIT, Only the primary station has right to transmit, and we therefor
- * do not expect to receive any transmissions from other stations.
- *
+ * XMIT, Only the primary station has right to transmit, and we
+ * therefore do not expect to receive any transmissions from other
+ * stations.
+ *
*/
static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
struct sk_buff *skb, struct irlap_info *info)
@@ -739,7 +769,7 @@ static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
DEBUG( 4, __FUNCTION__ "(), event=%s, vs=%d, vr=%d",
irlap_event[ event], self->vs, self->vr);
- switch( event) {
+ switch (event) {
case SEND_I_CMD:
ASSERT( skb != NULL, return -1;);
DEBUG( 4, __FUNCTION__ "(), Window=%d\n", self->window);
@@ -785,6 +815,12 @@ static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
DEBUG( 4, __FUNCTION__ "(), window <= 1\n");
irlap_send_data_primary_poll( self, skb);
irlap_next_state( self, LAP_NRM_P);
+
+ /*
+ * Make sure state machine does not try to send
+ * any more frames
+ */
+ ret = -EPROTO;
}
#ifdef CONFIG_IRDA_FAST_RR
/* Peer may want to reply immediately */
@@ -793,7 +829,7 @@ static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
} else {
DEBUG( 4, __FUNCTION__
"(), Unable to send! remote busy?\n");
- skb_queue_head( &self->tx_list, skb);
+ skb_queue_head(&self->tx_list, skb);
/*
* The next ret is important, because it tells
@@ -802,22 +838,23 @@ static int irlap_state_xmit_p( struct irlap_cb *self, IRLAP_EVENT event,
ret = -EPROTO;
}
break;
+ case POLL_TIMER_EXPIRED:
+ irlap_send_rr_frame(self, CMD_FRAME);
+ irlap_start_final_timer(self, self->final_timeout);
+ irlap_next_state(self, LAP_NRM_P);
+ break;
case DISCONNECT_REQUEST:
- del_timer( &self->poll_timer);
- irlap_wait_min_turn_around( self, &self->qos_tx);
- irlap_send_disc_frame( self);
- irlap_flush_all_queues( self);
- irlap_start_final_timer( self, self->final_timeout);
+ del_timer(&self->poll_timer);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_send_disc_frame(self);
+ irlap_flush_all_queues(self);
+ irlap_start_final_timer(self, self->final_timeout);
self->retry_count = 0;
- irlap_next_state( self, LAP_PCLOSE);
- break;
- case POLL_TIMER_EXPIRED:
- irlap_send_rr_frame( self, CMD_FRAME);
- irlap_start_final_timer( self, self->final_timeout);
- irlap_next_state( self, LAP_NRM_P);
+ irlap_next_state(self, LAP_PCLOSE);
break;
default:
- /* DEBUG( 0, "irlap_state_xmit: Unknown event"); */
+ DEBUG(0, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[event]);
ret = -EINVAL;
break;
}
@@ -834,7 +871,7 @@ static int irlap_state_pclose( struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == LAP_MAGIC, return -1;);
@@ -870,7 +907,7 @@ static int irlap_state_pclose( struct irlap_cb *self, IRLAP_EVENT event,
}
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d\n", event);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %d\n", event);
ret = -1;
break;
}
@@ -886,117 +923,18 @@ static int irlap_state_pclose( struct irlap_cb *self, IRLAP_EVENT event,
* transmit any frames and is expecting to receive frames only from the
* secondary to which transmission permissions has been given.
*/
-static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
int ns_status;
int nr_status;
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == LAP_MAGIC, return -1;);
-
- switch( event) {
- case RECV_RR_RSP:
- DEBUG( 4, __FUNCTION__ "(), RECV_RR_FRAME: "
- "Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va, self->vs,
- self->vr);
-
- ASSERT( info != NULL, return -1;);
-
- /*
- * If you get a RR, the remote isn't busy anymore,
- * no matter what the NR
- */
- self->remote_busy = FALSE;
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == LAP_MAGIC, return -1;);
- /*
- * Nr as expected?
- */
- ret = irlap_validate_nr_received( self, info->nr);
- if ( ret == NR_EXPECTED) {
- /* Stop final timer */
- del_timer( &self->final_timer);
-
- /* Update Nr received */
- irlap_update_nr_received( self, info->nr);
-
- /*
- * Got expected NR, so reset the retry_count. This
- * is not done by the IrLAP standard , which is
- * strange! DB.
- */
- self->retry_count = 0;
- irlap_wait_min_turn_around( self, &self->qos_tx);
-
- /* Start poll timer */
- irlap_start_poll_timer( self, self->poll_timeout);
-
- irlap_next_state( self, LAP_XMIT_P);
- } else if ( ret == NR_UNEXPECTED) {
- ASSERT( info != NULL, return -1;);
- /*
- * Unexpected nr!
- */
-
- /* Update Nr received */
- irlap_update_nr_received( self, info->nr);
-
- DEBUG( 4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, "
- "vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va,
- self->vs, self->vr);
-
- /* Resend rejected frames */
- irlap_resend_rejected_frames( self, CMD_FRAME);
-
- /*
- * Start only if not running, DB
- * TODO: Should this one be here?
- */
- /* if ( !self->final_timer.prev) */
-/* irda_start_timer( FINAL_TIMER, self->final_timeout); */
-
- /* Keep state */
- irlap_next_state( self, LAP_NRM_P);
- } else if ( ret == NR_INVALID) {
- DEBUG( 0, "irlap_state_nrm_p: received RR with "
- "invalid nr !\n");
- del_timer( &self->final_timer);
-
- irlap_next_state( self, LAP_RESET_WAIT);
-
- irlap_disconnect_indication( self,
- LAP_RESET_INDICATION);
- self->xmitflag = TRUE;
- }
- if (skb)
- dev_kfree_skb( skb);
- break;
- case RECV_RNR_FRAME:
- DEBUG( 4, "irlap_state_nrm_p: RECV_RNR_FRAME: Retrans:%d, "
- "nr=%d, va=%d, vs=%d, vr=%d\n",
- self->retry_count, info->nr, self->va, self->vs,
- self->vr);
-
- ASSERT( info != NULL, return -1;);
-
- /* Stop final timer */
- del_timer( &self->final_timer);
- self->remote_busy = TRUE;
-
- /* Update Nr received */
- irlap_update_nr_received( self, info->nr);
-
- /* Start poll timer */
- irlap_start_poll_timer( self, self->poll_timeout);
-
- irlap_next_state( self, LAP_XMIT_P);
-
- dev_kfree_skb( skb);
- break;
- case RECV_I_RSP:
+ switch (event) {
+ case RECV_I_RSP: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
#ifdef CONFIG_IRDA_FAST_RR
/*
@@ -1009,8 +947,8 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
ASSERT( info != NULL, return -1;);
- ns_status = irlap_validate_ns_received( self, info->ns);
- nr_status = irlap_validate_nr_received( self, info->nr);
+ ns_status = irlap_validate_ns_received(self, info->ns);
+ nr_status = irlap_validate_nr_received(self, info->nr);
/*
* Check for expected I(nformation) frame
@@ -1019,7 +957,7 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
/*
* poll bit cleared?
*/
- if ( !info->pf) {
+ if (!info->pf) {
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
@@ -1028,16 +966,16 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
self->ack_required = TRUE;
/* Keep state, do not move this line */
- irlap_next_state( self, LAP_NRM_P);
+ irlap_next_state(self, LAP_NRM_P);
- irlap_data_indication( self, skb);
+ irlap_data_indication(self, skb);
} else {
- del_timer( &self->final_timer);
+ del_timer(&self->final_timer);
self->vr = (self->vr + 1) % 8;
/* Update Nr received */
- irlap_update_nr_received( self, info->nr);
+ irlap_update_nr_received(self, info->nr);
/*
* Got expected NR, so reset the
@@ -1047,13 +985,17 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
self->retry_count = 0;
self->ack_required = TRUE;
- /* This is the last frame */
- irlap_start_poll_timer( self, self->poll_timeout);
- irlap_wait_min_turn_around( self, &self->qos_tx);
- /* Do not move this line */
- irlap_next_state( self, LAP_XMIT_P);
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ /*
+ * Important to switch state before calling
+ * upper layers
+ */
+ irlap_next_state(self, LAP_XMIT_P);
- irlap_data_indication( self, skb);
+ irlap_data_indication(self, skb);
+
+ /* This is the last frame */
+ irlap_start_poll_timer(self, self->poll_timeout);
}
break;
@@ -1061,8 +1003,7 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Unexpected next to send (Ns)
*/
- if (( ns_status == NS_UNEXPECTED) &&
- ( nr_status == NR_EXPECTED))
+ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED))
{
if ( !info->pf) {
irlap_update_nr_received( self, info->nr);
@@ -1095,8 +1036,7 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Unexpected next to receive (Nr)
*/
- if (( ns_status == NS_EXPECTED) &&
- ( nr_status == NR_UNEXPECTED))
+ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED))
{
if ( info->pf) {
self->vr = (self->vr + 1) % 8;
@@ -1139,8 +1079,8 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
* Unexpected next to send (Ns) and next to receive (Nr)
* Not documented by IrLAP!
*/
- if (( ns_status == NS_UNEXPECTED) &&
- ( nr_status == NR_UNEXPECTED))
+ if ((ns_status == NS_UNEXPECTED) &&
+ (nr_status == NR_UNEXPECTED))
{
DEBUG( 4, "IrLAP: unexpected nr and ns!\n");
if ( info->pf) {
@@ -1164,37 +1104,137 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
/*
* Invalid NR or NS
*/
- if (( nr_status == NR_INVALID) || ( ns_status == NS_INVALID)) {
- if ( info->pf) {
- del_timer( &self->final_timer);
+ if ((nr_status == NR_INVALID) || (ns_status == NS_INVALID)) {
+ if (info->pf) {
+ del_timer(&self->final_timer);
- irlap_next_state( self, LAP_RESET_WAIT);
+ irlap_next_state(self, LAP_RESET_WAIT);
- irlap_disconnect_indication( self, LAP_RESET_INDICATION);
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = TRUE;
} else {
- del_timer( &self->final_timer);
+ del_timer(&self->final_timer);
- irlap_disconnect_indication( self, LAP_RESET_INDICATION);
+ irlap_disconnect_indication(self, LAP_RESET_INDICATION);
self->xmitflag = FALSE;
}
break;
}
- DEBUG( 0, "irlap_state_nrm_p: Not implemented!\n");
- DEBUG( 0, "event=%s, ns_status=%d, nr_status=%d\n",
- irlap_event[ event], ns_status, nr_status);
+ DEBUG(1, __FUNCTION__ "(), Not implemented!\n");
+ DEBUG(1, __FUNCTION__
+ "(), event=%s, ns_status=%d, nr_status=%d\n",
+ irlap_event[ event], ns_status, nr_status);
break;
case RECV_UI_FRAME:
/* poll bit cleared? */
- if ( !info->pf) {
- irlap_unit_data_indication( self, skb);
- irlap_next_state( self, LAP_NRM_P);
+ if (!info->pf) {
+ irlap_unit_data_indication(self, skb);
+ irlap_next_state(self, LAP_NRM_P);
} else {
+ del_timer(&self->final_timer);
+ irlap_unit_data_indication(self, skb);
+ irlap_start_poll_timer(self, self->poll_timeout);
+ }
+ break;
+ case RECV_RR_RSP:
+ DEBUG(4, __FUNCTION__ "(), RECV_RR_FRAME: "
+ "Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n",
+ self->retry_count, info->nr, self->va, self->vs,
+ self->vr);
+
+ ASSERT(info != NULL, return -1;);
+
+ /*
+ * If you get a RR, the remote isn't busy anymore,
+ * no matter what the NR
+ */
+ self->remote_busy = FALSE;
+
+ /*
+ * Nr as expected?
+ */
+ ret = irlap_validate_nr_received(self, info->nr);
+ if (ret == NR_EXPECTED) {
+ /* Stop final timer */
+ del_timer(&self->final_timer);
+
+ /* Update Nr received */
+ irlap_update_nr_received(self, info->nr);
+
+ /*
+ * Got expected NR, so reset the retry_count. This
+ * is not done by the IrLAP standard , which is
+ * strange! DB.
+ */
+ self->retry_count = 0;
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+
+ irlap_next_state(self, LAP_XMIT_P);
+
+ /* Start poll timer */
+ irlap_start_poll_timer(self, self->poll_timeout);
+ } else if (ret == NR_UNEXPECTED) {
+ ASSERT( info != NULL, return -1;);
+ /*
+ * Unexpected nr!
+ */
+
+ /* Update Nr received */
+ irlap_update_nr_received( self, info->nr);
+
+ DEBUG( 4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, "
+ "vs=%d, vr=%d\n",
+ self->retry_count, info->nr, self->va,
+ self->vs, self->vr);
+
+ /* Resend rejected frames */
+ irlap_resend_rejected_frames( self, CMD_FRAME);
+
+ /*
+ * Start only if not running, DB
+ * TODO: Should this one be here?
+ */
+ /* if ( !self->final_timer.prev) */
+/* irda_start_timer( FINAL_TIMER, self->final_timeout); */
+
+ /* Keep state */
+ irlap_next_state( self, LAP_NRM_P);
+ } else if (ret == NR_INVALID) {
+ DEBUG(1, "irlap_state_nrm_p: received RR with "
+ "invalid nr !\n");
del_timer( &self->final_timer);
- irlap_unit_data_indication( self, skb);
- irlap_start_poll_timer( self, self->poll_timeout);
+
+ irlap_next_state( self, LAP_RESET_WAIT);
+
+ irlap_disconnect_indication( self,
+ LAP_RESET_INDICATION);
+ self->xmitflag = TRUE;
}
+ if (skb)
+ dev_kfree_skb( skb);
+ break;
+ case RECV_RNR_FRAME:
+ DEBUG( 4, "irlap_state_nrm_p: RECV_RNR_FRAME: Retrans:%d, "
+ "nr=%d, va=%d, vs=%d, vr=%d\n",
+ self->retry_count, info->nr, self->va, self->vs,
+ self->vr);
+
+ ASSERT( info != NULL, return -1;);
+
+ /* Stop final timer */
+ del_timer( &self->final_timer);
+ self->remote_busy = TRUE;
+
+ /* Update Nr received */
+ irlap_update_nr_received( self, info->nr);
+
+ irlap_next_state( self, LAP_XMIT_P);
+
+ /* Start poll timer */
+ irlap_start_poll_timer( self, self->poll_timeout);
+
+ dev_kfree_skb( skb);
break;
case RECV_FRMR_RSP:
del_timer( &self->final_timer);
@@ -1257,7 +1297,7 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
}
break;
case RECV_DISC_FRAME: /* FIXME: Check how this is in the standard! */
- DEBUG( 0, __FUNCTION__ "(), RECV_DISC_FRAME()\n");
+ DEBUG(1, __FUNCTION__ "(), RECV_DISC_FRAME()\n");
/* Always switch state before calling upper layers */
irlap_next_state( self, LAP_NDM);
@@ -1277,7 +1317,8 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
break;
default:
- /* DEBUG( 0, "irlap_state_nrm_p: Unknown event"); */
+ DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[event]);
ret = -1;
break;
}
@@ -1291,8 +1332,8 @@ static int irlap_state_nrm_p( struct irlap_cb *self, IRLAP_EVENT event,
* awaiting reset of disconnect request.
*
*/
-int irlap_state_reset_wait( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
@@ -1322,8 +1363,8 @@ int irlap_state_reset_wait( struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state( self, LAP_PCLOSE);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[event]);
ret = -1;
break;
}
@@ -1337,19 +1378,19 @@ int irlap_state_reset_wait( struct irlap_cb *self, IRLAP_EVENT event,
* reply.
*
*/
-int irlap_state_reset( struct irlap_cb *self, IRLAP_EVENT event,
- struct sk_buff *skb, struct irlap_info *info)
+static int irlap_state_reset( struct irlap_cb *self, IRLAP_EVENT event,
+ struct sk_buff *skb, struct irlap_info *info)
{
int ret = 0;
- DEBUG( 3, __FUNCTION__ "(), event = %s\n", irlap_event[event]);
+ DEBUG(3, __FUNCTION__ "(), event = %s\n", irlap_event[event]);
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == LAP_MAGIC, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == LAP_MAGIC, return -1;);
- switch( event) {
+ switch(event) {
case RECV_DISC_FRAME:
- del_timer( &self->final_timer);
+ del_timer(&self->final_timer);
irlap_apply_default_connection_parameters( self);
@@ -1367,8 +1408,10 @@ int irlap_state_reset( struct irlap_cb *self, IRLAP_EVENT event,
irlap_reset_confirm();
self->remote_busy = FALSE;
- irlap_start_poll_timer( self, self->poll_timeout);
+
irlap_next_state( self, LAP_XMIT_P);
+
+ irlap_start_poll_timer( self, self->poll_timeout);
break;
case FINAL_TIMER_EXPIRED:
if ( self->retry_count < 3) {
@@ -1403,8 +1446,8 @@ int irlap_state_reset( struct irlap_cb *self, IRLAP_EVENT event,
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[ event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[ event]);
ret = -1;
break;
}
@@ -1468,16 +1511,22 @@ static int irlap_state_xmit_s( struct irlap_cb *self, IRLAP_EVENT event,
DEBUG( 4, "(), window <= 1\n");
irlap_send_data_secondary_final( self, skb);
irlap_next_state( self, LAP_NRM_S);
+
+ /*
+ * Make sure state machine does not try to send
+ * any more frames
+ */
+ ret = -EPROTO;
}
} else {
- DEBUG( 0, __FUNCTION__ "(), Unable to send!\n");
+ DEBUG(1, __FUNCTION__ "(), Unable to send!\n");
skb_queue_head( &self->tx_list, skb);
ret = -EPROTO;
}
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %s\n",
- irlap_event[ event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %s\n",
+ irlap_event[ event]);
ret = -EINVAL;
break;
}
@@ -1503,54 +1552,8 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == LAP_MAGIC, return -1;);
- switch( event) {
- case RECV_RR_CMD:
- self->retry_count = 0;
-
- /*
- * Nr as expected?
- */
- nr_status = irlap_validate_nr_received( self, info->nr);
- if ( nr_status == NR_EXPECTED) {
- if (( skb_queue_len( &self->tx_list) > 0) &&
- ( self->window > 0)) {
- self->remote_busy = FALSE;
-
- /* Update Nr received */
- irlap_update_nr_received( self, info->nr);
- del_timer( &self->wd_timer);
-
- irlap_wait_min_turn_around( self, &self->qos_tx);
- irlap_next_state( self, LAP_XMIT_S);
- } else {
- self->remote_busy = FALSE;
- /* Update Nr received */
- irlap_update_nr_received( self, info->nr);
- irlap_wait_min_turn_around( self, &self->qos_tx);
-
- irlap_send_rr_frame( self, RSP_FRAME);
-
- irlap_start_wd_timer( self, self->wd_timeout);
- irlap_next_state( self, LAP_NRM_S);
- }
- } else if ( nr_status == NR_UNEXPECTED) {
- self->remote_busy = FALSE;
- irlap_update_nr_received( self, info->nr);
- irlap_resend_rejected_frames( self, RSP_FRAME);
-
- irlap_start_wd_timer( self, self->wd_timeout);
-
- /* Keep state */
- irlap_next_state( self, LAP_NRM_S);
- } else {
- DEBUG( 0, __FUNCTION__ "(), "
- "invalid nr not implemented!\n");
- }
- if ( skb)
- dev_kfree_skb( skb);
-
- break;
- case RECV_I_CMD:
+ switch(event) {
+ case RECV_I_CMD: /* Optimize for the common case */
/* FIXME: must check for remote_busy below */
DEBUG( 4, __FUNCTION__ "(), event=%s nr=%d, vs=%d, ns=%d, "
"vr=%d, pf=%d\n", irlap_event[event], info->nr,
@@ -1729,9 +1732,54 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
}
}
break;
+ case RECV_RR_CMD:
+ self->retry_count = 0;
+
+ /*
+ * Nr as expected?
+ */
+ nr_status = irlap_validate_nr_received( self, info->nr);
+ if ( nr_status == NR_EXPECTED) {
+ if (( skb_queue_len( &self->tx_list) > 0) &&
+ ( self->window > 0)) {
+ self->remote_busy = FALSE;
+
+ /* Update Nr received */
+ irlap_update_nr_received( self, info->nr);
+ del_timer( &self->wd_timer);
+
+ irlap_wait_min_turn_around( self, &self->qos_tx);
+ irlap_next_state( self, LAP_XMIT_S);
+ } else {
+ self->remote_busy = FALSE;
+ /* Update Nr received */
+ irlap_update_nr_received( self, info->nr);
+ irlap_wait_min_turn_around( self, &self->qos_tx);
+
+ irlap_send_rr_frame( self, RSP_FRAME);
+
+ irlap_start_wd_timer( self, self->wd_timeout);
+ irlap_next_state( self, LAP_NRM_S);
+ }
+ } else if ( nr_status == NR_UNEXPECTED) {
+ self->remote_busy = FALSE;
+ irlap_update_nr_received( self, info->nr);
+ irlap_resend_rejected_frames( self, RSP_FRAME);
+
+ irlap_start_wd_timer( self, self->wd_timeout);
+
+ /* Keep state */
+ irlap_next_state( self, LAP_NRM_S);
+ } else {
+ DEBUG(1, __FUNCTION__ "(), invalid nr not implemented!\n");
+ }
+ if ( skb)
+ dev_kfree_skb( skb);
+
+ break;
case RECV_SNRM_CMD:
del_timer( &self->wd_timer);
- DEBUG( 0, "irlap_state_nrm_s: received SNRM cmd\n");
+ DEBUG(1, __FUNCTION__ "(), received SNRM cmd\n");
irlap_next_state( self, LAP_RESET_CHECK);
irlap_reset_indication( self);
@@ -1743,7 +1791,8 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
* Wait until retry_count * n matches negotiated threshold/
* disconnect time (note 2 in IrLAP p. 82)
*/
- DEBUG( 0, "retry_count = %d\n", self->retry_count);
+ DEBUG(1, __FUNCTION__ "(), retry_count = %d\n",
+ self->retry_count);
if (( self->retry_count < (self->N2/2)) &&
( self->retry_count != self->N1/2)) {
@@ -1798,10 +1847,14 @@ static int irlap_state_nrm_s( struct irlap_cb *self, IRLAP_EVENT event,
irlap_next_state( self, LAP_NRM_S);
#endif
break;
-
+ case RECV_TEST_CMD:
+ skb_pull(skb, sizeof(struct test_frame));
+ irlap_send_test_frame(self, info->daddr, skb);
+ dev_kfree_skb(skb);
+ break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d, (%s)\n",
- event, irlap_event[event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n",
+ event, irlap_event[event]);
ret = -1;
break;
}
@@ -1828,7 +1881,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
{
int ret = 0;
- DEBUG( 0, __FUNCTION__ "(), event=%s\n", irlap_event[ event]);
+ DEBUG(1, __FUNCTION__ "(), event=%s\n", irlap_event[ event]);
ASSERT( self != NULL, return -ENODEV;);
ASSERT( self->magic == LAP_MAGIC, return -EBADR;);
@@ -1848,15 +1901,11 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event,
irlap_start_wd_timer( self, WD_TIMEOUT);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown event %d, (%s)\n",
- event, irlap_event[event]);
+ DEBUG(1, __FUNCTION__ "(), Unknown event %d, (%s)\n",
+ event, irlap_event[event]);
ret = -1;
break;
}
return ret;
}
-
-
-
-
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index d00fd1852..cda78e7f1 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlap_frame.c
- * Version: 0.3
+ * Version: 0.9
* Description: Build and transmit IrLAP frames
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 10:27:26 1997
- * Modified at: Tue Jan 19 22:58:13 1999
+ * Modified at: Fri Apr 23 09:30:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Resrved.
@@ -22,15 +22,16 @@
*
********************************************************************/
-#include <linux/config.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-
#include <linux/skbuff.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
+#include <linux/irda.h>
+
#include <net/pkt_sched.h>
+#include <net/sock.h>
+
+#include <asm/byteorder.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
@@ -40,8 +41,6 @@
#include <net/irda/irlap_frame.h>
#include <net/irda/qos.h>
-extern __u8 *irlmp_hint_to_service( __u8 *hint);
-
/*
* Function irlap_insert_mtt (self, skb)
*
@@ -49,15 +48,13 @@ extern __u8 *irlmp_hint_to_service( __u8 *hint);
* need to do this since it's per packet relevant information.
*
*/
-__inline__ void irlap_insert_mtt( struct irlap_cb *self, struct sk_buff *skb)
+static inline void irlap_insert_mtt(struct irlap_cb *self, struct sk_buff *skb)
{
struct irlap_skb_cb *cb;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
-
cb = (struct irlap_skb_cb *) skb->cb;
+ cb->magic = LAP_MAGIC;
cb->mtt = self->mtt_required;
/* Reset */
@@ -71,8 +68,30 @@ __inline__ void irlap_insert_mtt( struct irlap_cb *self, struct sk_buff *skb)
/* Reset XBOF's delay (used only for getting min turn time) */
self->xbofs_delay = 0;
+}
+
+/*
+ * Function irlap_queue_xmit (self, skb)
+ *
+ * A little wrapper for dev_queue_xmit, so we can insert some common
+ * code into it.
+ */
+void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb)
+{
+ /* Some common init stuff */
+ skb->dev = self->netdev;
+ skb->h.raw = skb->nh.raw = skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ skb->priority = TC_PRIO_BESTEFFORT;
- DEBUG( 4, __FUNCTION__ "(), using %d xbofs\n", cb->xbofs);
+ /*
+ * Insert MTT (min. turn time) into skb, so that the device driver
+ * knows which MTT to use
+ */
+ irlap_insert_mtt(self, skb);
+
+ dev_queue_xmit(skb);
+ self->stats.tx_packets++;
}
/*
@@ -80,59 +99,47 @@ __inline__ void irlap_insert_mtt( struct irlap_cb *self, struct sk_buff *skb)
*
* Transmits a connect SNRM command frame
*/
-void irlap_send_snrm_frame( struct irlap_cb *self, struct qos_info *qos)
+void irlap_send_snrm_frame(struct irlap_cb *self, struct qos_info *qos)
{
struct sk_buff *skb;
- __u8 *frame;
+ struct snrm_frame *frame;
int len;
- int n;
- DEBUG( 4, "irlap_send_snrm_cmd()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
-
- n = 0;
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
/* Allocate frame */
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0,"irlap_send_snrm_cmd: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
- skb_put( skb, 2);
- frame = skb->data;
+ skb_put(skb, 2);
+ frame = (struct snrm_frame *) skb->data;
- /* Insert address field */
- frame[n] = CMD_FRAME;
- frame[n++] |= (qos) ? CBROADCAST : self->caddr;
+ /* Insert connection address field */
+ if (qos)
+ frame->caddr = CMD_FRAME | CBROADCAST;
+ else
+ frame->caddr = CMD_FRAME | self->caddr;
/* Insert control field */
- frame[n++] = SNRM_CMD | PF_BIT;
+ frame->control = SNRM_CMD | PF_BIT;
/*
* If we are establishing a connection then insert QoS paramerters
*/
if (qos) {
- skb_put( skb, 9); /* 21 left */
- memcpy( frame+n, &self->saddr, 4); n += 4;
- memcpy( frame+n, &self->daddr, 4); n += 4;
+ skb_put(skb, 9); /* 21 left */
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(self->daddr);
- frame[n++] = self->caddr;
+ frame->ncaddr = self->caddr;
- /* skb_put( skb, 21); */
- len = irda_insert_qos_negotiation_params( qos, frame+n);
+ len = irda_insert_qos_negotiation_params(qos, frame->params);
/* Should not be dangerous to do this afterwards */
- skb_put( skb, len);
+ skb_put(skb, len);
}
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
-
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
@@ -141,25 +148,32 @@ void irlap_send_snrm_frame( struct irlap_cb *self, struct qos_info *qos)
* Received SNRM (Set Normal Response Mode) command frame
*
*/
-static void irlap_recv_snrm_cmd( struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info)
+static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info)
{
struct snrm_frame *frame;
- DEBUG( 4, __FUNCTION__ "() <%ld>\n", jiffies);
+ DEBUG(3, __FUNCTION__ "()\n");
- ASSERT( skb != NULL, return;);
- ASSERT( info != NULL, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(info != NULL, return;);
- frame = ( struct snrm_frame *) skb->data;
+ frame = (struct snrm_frame *) skb->data;
/* Copy peer device address */
- memcpy( &info->daddr, &frame->saddr, 4);
+ info->daddr = le32_to_cpu(frame->saddr);
/* Copy connection address */
info->caddr = frame->ncaddr;
- irlap_do_event( self, RECV_SNRM_CMD, skb, info);
+ /* Check if connection address has got a valid value */
+ if ((info->caddr == 0x00) || (info->caddr == 0xfe)) {
+ DEBUG(3, __FUNCTION__ "(), invalid connection address!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ irlap_do_event(self, RECV_SNRM_CMD, skb, info);
}
/*
@@ -168,51 +182,41 @@ static void irlap_recv_snrm_cmd( struct irlap_cb *self, struct sk_buff *skb,
* Send UA (Unnumbered Acknowledgement) frame
*
*/
-void irlap_send_ua_response_frame( struct irlap_cb *self,
- struct qos_info *qos)
+void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos)
{
struct sk_buff *skb;
- __u8 *frame;
- int n;
+ struct ua_frame *frame;
int len;
- DEBUG( 4, __FUNCTION__ "() <%ld>\n", jiffies);
+ DEBUG(2, __FUNCTION__ "() <%ld>\n", jiffies);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
skb = NULL;
- n = 0;
/* Allocate frame */
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
skb_put( skb, 10);
- frame = skb->data;
+ frame = (struct ua_frame *) skb->data;
/* Build UA response */
- frame[n++] = self->caddr;
- frame[n++] = UA_RSP | PF_BIT;
- memcpy( frame+n, &self->saddr, 4); n += 4;
- memcpy( frame+n, &self->daddr, 4); n += 4;
-
+ frame->caddr = self->caddr;
+ frame->control = UA_RSP | PF_BIT;
+
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(self->daddr);
+
/* Should we send QoS negotiation parameters? */
- if ( qos) {
- len = irda_insert_qos_negotiation_params( qos, frame+n);
- skb_put( skb, len);
+ if (qos) {
+ len = irda_insert_qos_negotiation_params(qos, frame->params);
+ skb_put(skb, len);
}
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
-
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
@@ -227,32 +231,24 @@ void irlap_send_dm_frame( struct irlap_cb *self)
struct sk_buff *skb = NULL;
__u8 *frame;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
- skb = dev_alloc_skb( 32);
- if (skb == NULL) {
- DEBUG( 0,"irlap_send_disc_frame: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb(32);
+ if (!skb)
return;
- }
skb_put( skb, 2);
frame = skb->data;
- if ( self->state == LAP_NDM)
+ if (self->state == LAP_NDM)
frame[0] = CBROADCAST;
else
frame[0] = self->caddr;
frame[1] = DM_RSP | PF_BIT;
-
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
@@ -261,33 +257,27 @@ void irlap_send_dm_frame( struct irlap_cb *self)
* Send disconnect (DISC) frame
*
*/
-void irlap_send_disc_frame( struct irlap_cb *self)
+void irlap_send_disc_frame(struct irlap_cb *self)
{
struct sk_buff *skb = NULL;
__u8 *frame;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
+ DEBUG(3, __FUNCTION__ "()\n");
- skb = dev_alloc_skb( 32);
- if (skb == NULL) {
- DEBUG( 0,"irlap_send_disc_frame: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+
+ skb = dev_alloc_skb(32);
+ if (!skb)
return;
- }
- skb_put( skb, 2);
+ skb_put(skb, 2);
frame = skb->data;
frame[0] = self->caddr | CMD_FRAME;
frame[1] = DISC_CMD | PF_BIT;
-
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
@@ -296,8 +286,8 @@ void irlap_send_disc_frame( struct irlap_cb *self)
* Build and transmit a XID (eXchange station IDentifier) discovery
* frame.
*/
-void irlap_send_discovery_xid_frame( struct irlap_cb *self, int S, __u8 s,
- __u8 command, DISCOVERY *discovery)
+void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s,
+ __u8 command, discovery_t *discovery)
{
struct sk_buff *skb = NULL;
struct xid_frame *frame;
@@ -309,15 +299,12 @@ void irlap_send_discovery_xid_frame( struct irlap_cb *self, int S, __u8 s,
ASSERT( self->magic == LAP_MAGIC, return;);
ASSERT( discovery != NULL, return;);
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0,"irlap_send_discovery_xid_frame: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
- skb_put( skb, 14);
- frame = ( struct xid_frame *) skb->data;
+ skb_put(skb, 14);
+ frame = (struct xid_frame *) skb->data;
if ( command) {
frame->caddr = CBROADCAST | CMD_FRAME;
@@ -328,14 +315,14 @@ void irlap_send_discovery_xid_frame( struct irlap_cb *self, int S, __u8 s,
}
frame->ident = XID_FORMAT;
- memcpy( &frame->saddr, &self->saddr, 4);
+ frame->saddr = cpu_to_le32(self->saddr);
- if ( command)
- memcpy( &frame->daddr, &bcast, 4);
+ if (command)
+ frame->daddr = cpu_to_le32(bcast);
else
- memcpy( &frame->daddr, &self->daddr, 4);
+ frame->daddr = cpu_to_le32(discovery->daddr);
- switch( S) {
+ switch(S) {
case 1:
frame->flags = 0x00;
break;
@@ -364,15 +351,15 @@ void irlap_send_discovery_xid_frame( struct irlap_cb *self, int S, __u8 s,
if ( !command || ( frame->slotnr == 0xff)) {
int i;
- if( discovery->hint[0] & HINT_EXTENSION)
+ if (discovery->hints.byte[0] & HINT_EXTENSION)
skb_put( skb, 3+discovery->info_len);
else
skb_put( skb, 2+discovery->info_len);
i = 0;
- frame->discovery_info[i++] = discovery->hint[0];
- if( discovery->hint[0] & HINT_EXTENSION)
- frame->discovery_info[i++] = discovery->hint[1];
+ frame->discovery_info[i++] = discovery->hints.byte[0];
+ if(discovery->hints.byte[0] & HINT_EXTENSION)
+ frame->discovery_info[i++] = discovery->hints.byte[1];
frame->discovery_info[i++] = discovery->charset;
@@ -382,15 +369,9 @@ void irlap_send_discovery_xid_frame( struct irlap_cb *self, int S, __u8 s,
discovery->info_len);
}
-
ASSERT( self->netdev != NULL, return;);
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
-
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
@@ -399,47 +380,48 @@ void irlap_send_discovery_xid_frame( struct irlap_cb *self, int S, __u8 s,
* Received a XID discovery response
*
*/
-static void irlap_recv_discovery_xid_rsp( struct irlap_cb *self,
- struct sk_buff *skb,
- struct irlap_info *info)
+static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info)
{
struct xid_frame *xid;
- DISCOVERY *discovery = NULL;
+ discovery_t *discovery = NULL;
char *text;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
- ASSERT( info != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(info != NULL, return;);
- if (( discovery = kmalloc( sizeof( DISCOVERY), GFP_ATOMIC)) == NULL) {
- DEBUG( 0, __FUNCTION__ "(), kmalloc failed!\n");
+ if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
+ DEBUG(0, __FUNCTION__ "(), kmalloc failed!\n");
return;
}
- memset( discovery, 0, sizeof( DISCOVERY));
+ memset(discovery, 0, sizeof(discovery_t));
xid = (struct xid_frame *) skb->data;
/*
* Copy peer device address and set the source address
*/
- memcpy( &info->daddr, &xid->saddr, 4);
+ info->daddr = le32_to_cpu(xid->saddr);
discovery->daddr = info->daddr;
discovery->saddr = self->saddr;
+ discovery->timestamp = jiffies;
- DEBUG( 4, __FUNCTION__ "(), daddr=%08x\n", discovery->daddr);
+ DEBUG(4, __FUNCTION__ "(), daddr=%08x\n", discovery->daddr);
/* Get info returned from peer */
- discovery->hint[0] = xid->discovery_info[0];
- if ( xid->discovery_info[0] & HINT_EXTENSION) {
- DEBUG( 4, "EXTENSION\n");
- discovery->hint[1] = xid->discovery_info[1];
+ discovery->hints.byte[0] = xid->discovery_info[0];
+ if (xid->discovery_info[0] & HINT_EXTENSION) {
+ DEBUG(4, "EXTENSION\n");
+ discovery->hints.byte[1] = xid->discovery_info[1];
discovery->charset = xid->discovery_info[2];
text = (char *) &xid->discovery_info[3];
} else {
- discovery->hint[1] = 0;
+ discovery->hints.byte[1] = 0;
discovery->charset = xid->discovery_info[1];
text = (char *) &xid->discovery_info[2];
}
@@ -448,11 +430,11 @@ static void irlap_recv_discovery_xid_rsp( struct irlap_cb *self,
* FCS bytes resides.
*/
skb->data[skb->len] = '\0';
- strcpy( discovery->info, text);
+ strcpy(discovery->info, text);
info->discovery = discovery;
- irlap_do_event( self, RECV_DISCOVERY_XID_RSP, skb, info);
+ irlap_do_event(self, RECV_DISCOVERY_XID_RSP, skb, info);
}
/*
@@ -466,7 +448,7 @@ static void irlap_recv_discovery_xid_cmd( struct irlap_cb *self,
struct irlap_info *info)
{
struct xid_frame *xid;
- DISCOVERY *discovery = NULL;
+ discovery_t *discovery = NULL;
char *text;
DEBUG( 4, __FUNCTION__ "()\n");
@@ -479,7 +461,7 @@ static void irlap_recv_discovery_xid_cmd( struct irlap_cb *self,
xid = (struct xid_frame *) skb->data;
/* Copy peer device address */
- memcpy( &info->daddr, &xid->saddr, 4);
+ info->daddr = le32_to_cpu(xid->saddr);
switch ( xid->flags & 0x03) {
case 0x00:
@@ -507,25 +489,24 @@ static void irlap_recv_discovery_xid_cmd( struct irlap_cb *self,
/*
* We now have some discovery info to deliver!
*/
- discovery = kmalloc( sizeof( DISCOVERY), GFP_ATOMIC);
- if ( !discovery) {
- DEBUG( 0, __FUNCTION__ "(), kmalloc failed!\n");
+ discovery = kmalloc( sizeof(discovery_t), GFP_ATOMIC);
+ if (!discovery)
return;
- }
+
discovery->daddr = info->daddr;
discovery->saddr = self->saddr;
+ discovery->timestamp = jiffies;
DEBUG( 4, __FUNCTION__ "(), daddr=%08x\n",
discovery->daddr);
- discovery->hint[0] = xid->discovery_info[0];
+ discovery->hints.byte[0] = xid->discovery_info[0];
if ( xid->discovery_info[0] & HINT_EXTENSION) {
- DEBUG( 4, "EXTENSION\n");
- discovery->hint[1] = xid->discovery_info[1];
+ discovery->hints.byte[1] = xid->discovery_info[1];
discovery->charset = xid->discovery_info[2];
text = (char *) &xid->discovery_info[3];
} else {
- discovery->hint[1] = 0;
+ discovery->hints.byte[1] = 0;
discovery->charset = xid->discovery_info[1];
text = (char *) &xid->discovery_info[2];
}
@@ -540,9 +521,6 @@ static void irlap_recv_discovery_xid_cmd( struct irlap_cb *self,
} else
info->discovery = NULL;
- DEBUG( 4, __FUNCTION__"(), s=%d, S=%d <%ld>\n",
- info->s, info->S, jiffies);
-
irlap_do_event( self, RECV_DISCOVERY_XID_CMD, skb, info);
}
@@ -552,103 +530,43 @@ static void irlap_recv_discovery_xid_cmd( struct irlap_cb *self,
* Build and transmit RR (Receive Ready) frame. Notice that it is currently
* only possible to send RR frames with the poll bit set.
*/
-void irlap_send_rr_frame( struct irlap_cb *self, int command)
+void irlap_send_rr_frame(struct irlap_cb *self, int command)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
__u8 *frame;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
-#ifdef CONFIG_IRDA_RECYCLE_RR
- if ( self->recycle_rr_skb) {
- DEBUG( 4, __FUNCTION__ "(), recycling skb!\n");
- skb = self->recycle_rr_skb;
- self->recycle_rr_skb = NULL;
- }
-#endif
- if ( !skb) {
- skb = dev_alloc_skb( 32);
- if (skb == NULL) {
- printk( KERN_WARNING "irlap_send_rr_frame: "
- "Could not allocate an skb of length %d\n", 32);
- return;
- }
- skb_put( skb, 2);
- }
- ASSERT( skb->len == 2, return;);
-
- frame = skb->data;
+ skb = dev_alloc_skb(32);
+ if (!skb)
+ return;
+
+ frame = skb_put(skb, 2);
frame[0] = self->caddr;
frame[0] |= (command) ? CMD_FRAME : 0;
frame[1] = RR | PF_BIT | (self->vr << 5);
- DEBUG( 4, __FUNCTION__ "(), vr=%d, %ld\n", self->vr, jiffies);
-
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
-
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
* Function irlap_recv_rr_frame (skb, info)
*
- * Received RR (Receive Ready) frame from peer station
- *
+ * Received RR (Receive Ready) frame from peer station, no harm in
+ * making it inline since its called only from one single place
+ * (irlap_input).
*/
-static void irlap_recv_rr_frame( struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info, int command)
+static inline void irlap_recv_rr_frame(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info, int command)
{
- __u8 *frame;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- frame = skb->data;
- info->nr = frame[1] >> 5;
-
- DEBUG( 4, __FUNCTION__ "(), nr=%d, %ld\n", info->nr, jiffies);
-
- /*
- * Make sure the state-machine is in the right state for receiving,
- * if not, then we just discard the received frame for now!
- * TODO: check if we should queue this frame, or make tty tell that
- * it is receiving frames until the frame is delivered instead of
- * until it is outside a frame.
- */
-#if 0
- if (( self->state != LAP_NRM_P) && ( self->state != LAP_NRM_S)) {
- DEBUG( 0, __FUNCTION__ "(), Wrong state, dropping frame!\n");
- dev_kfree_skb( skb);
- return;
- }
-#endif
+ info->nr = skb->data[1] >> 5;
-#ifdef CONFIG_IRDA_RECYCLE_RR
- /* Only recycle one RR frame */
- if ( self->recycle_rr_skb == NULL) {
-
- /* Keep this skb, so it can be reused */
- self->recycle_rr_skb = skb;
-
- /*
- * Set skb to NULL, so that the state machine will not
- * try to deallocate it.
- */
- skb = NULL;
- }
-#endif
/* Check if this is a command or a response frame */
- if ( command)
- irlap_do_event( self, RECV_RR_CMD, skb, info);
+ if (command)
+ irlap_do_event(self, RECV_RR_CMD, skb, info);
else
- irlap_do_event( self, RECV_RR_RSP, skb, info);
+ irlap_do_event(self, RECV_RR_RSP, skb, info);
}
void irlap_send_frmr_frame( struct irlap_cb *self, int command)
@@ -660,11 +578,8 @@ void irlap_send_frmr_frame( struct irlap_cb *self, int command)
ASSERT( self->magic == LAP_MAGIC, return;);
skb = dev_alloc_skb( 32);
- if (skb == NULL) {
- printk( KERN_WARNING "irlap_send_frmr_frame: "
- "Could not allocate an sk_buff of length %d\n", 32);
+ if (!skb)
return;
- }
skb_put( skb, 2);
frame = skb->data;
@@ -680,12 +595,7 @@ void irlap_send_frmr_frame( struct irlap_cb *self, int command)
DEBUG( 4, __FUNCTION__ "(), vr=%d, %ld\n",self->vr, jiffies);
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
-
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
@@ -716,15 +626,15 @@ static void irlap_recv_rnr_frame( struct irlap_cb *self, struct sk_buff *skb,
* Received UA (Unnumbered Acknowledgement) frame
*
*/
-static void irlap_recv_ua_frame( struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info)
+static void irlap_recv_ua_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "(), <%ld>\n", jiffies);
- ASSERT( skb != NULL, return;);
- ASSERT( info != NULL, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(info != NULL, return;);
- irlap_do_event( self, RECV_UA_RSP, skb, info);
+ irlap_do_event(self, RECV_UA_RSP, skb, info);
}
/*
@@ -737,16 +647,7 @@ void irlap_send_data_primary( struct irlap_cb *self, struct sk_buff *skb)
{
struct sk_buff *tx_skb;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- /* Initialize variables */
- tx_skb = NULL;
-
- if ( skb->data[1] == I_FRAME) {
+ if (skb->data[1] == I_FRAME) {
/*
* Insert frame sequence number (Vs) in control field before
@@ -754,11 +655,10 @@ void irlap_send_data_primary( struct irlap_cb *self, struct sk_buff *skb)
*/
skb->data[1] = I_FRAME | (self->vs << 1);
- /* * Copy buffer */
- tx_skb = skb_clone( skb, GFP_ATOMIC);
- /* tx_skb = skb_copy( skb, GFP_ATOMIC); */
- if ( tx_skb == NULL) {
- dev_kfree_skb( skb);
+ /* Copy buffer */
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ dev_kfree_skb(skb);
return;
}
@@ -766,12 +666,12 @@ void irlap_send_data_primary( struct irlap_cb *self, struct sk_buff *skb)
* make sure the skb->sk accounting of memory usage is sane
*/
if (skb->sk != NULL)
- skb_set_owner_w( tx_skb, skb->sk);
+ skb_set_owner_w(tx_skb, skb->sk);
/*
* Insert frame in store, in case of retransmissions
*/
- skb_queue_tail( &self->wx_list, skb);
+ skb_queue_tail(&self->wx_list, skb);
self->vs = (self->vs + 1) % 8;
self->ack_required = FALSE;
@@ -780,7 +680,7 @@ void irlap_send_data_primary( struct irlap_cb *self, struct sk_buff *skb)
irlap_send_i_frame( self, tx_skb, CMD_FRAME);
} else {
DEBUG( 4, __FUNCTION__ "(), sending unreliable frame\n");
- irlap_send_ui_frame( self, skb, CMD_FRAME);
+ irlap_send_ui_frame(self, skb, CMD_FRAME);
self->window -= 1;
}
}
@@ -793,17 +693,8 @@ void irlap_send_data_primary_poll( struct irlap_cb *self, struct sk_buff *skb)
{
struct sk_buff *tx_skb;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- /* Initialize variables */
- tx_skb = NULL;
-
/* Is this reliable or unreliable data? */
- if ( skb->data[1] == I_FRAME) {
+ if (skb->data[1] == I_FRAME) {
/*
* Insert frame sequence number (Vs) in control field before
@@ -812,10 +703,9 @@ void irlap_send_data_primary_poll( struct irlap_cb *self, struct sk_buff *skb)
skb->data[1] = I_FRAME | (self->vs << 1);
/* Copy buffer */
- tx_skb = skb_clone( skb, GFP_ATOMIC);
- /* tx_skb = skb_copy( skb, GFP_ATOMIC); */
- if ( tx_skb == NULL) {
- dev_kfree_skb( skb);
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ if (tx_skb == NULL) {
+ dev_kfree_skb(skb);
return;
}
@@ -823,20 +713,20 @@ void irlap_send_data_primary_poll( struct irlap_cb *self, struct sk_buff *skb)
* make sure the skb->sk accounting of memory usage is sane
*/
if (skb->sk != NULL)
- skb_set_owner_w( tx_skb, skb->sk);
+ skb_set_owner_w(tx_skb, skb->sk);
/*
* Insert frame in store, in case of retransmissions
*/
- skb_queue_tail( &self->wx_list, skb);
+ skb_queue_tail(&self->wx_list, skb);
/*
* Set poll bit if necessary. We do this to the copied
* skb, since retransmitted need to set or clear the poll
- * bit depending on when * they are sent.
+ * bit depending on when they are sent.
*/
/* Stop P timer */
- del_timer( &self->poll_timer);
+ del_timer(&self->poll_timer);
tx_skb->data[1] |= PF_BIT;
@@ -844,24 +734,24 @@ void irlap_send_data_primary_poll( struct irlap_cb *self, struct sk_buff *skb)
self->ack_required = FALSE;
self->window = self->window_size;
- irlap_start_final_timer( self, self->final_timeout);
+ irlap_start_final_timer(self, self->final_timeout);
- irlap_send_i_frame( self, tx_skb, CMD_FRAME);
+ irlap_send_i_frame(self, tx_skb, CMD_FRAME);
} else {
- DEBUG( 4, __FUNCTION__ "(), sending unreliable frame\n");
+ DEBUG(4, __FUNCTION__ "(), sending unreliable frame\n");
- del_timer( &self->poll_timer);
+ del_timer(&self->poll_timer);
- if ( self->ack_required) {
- irlap_send_ui_frame( self, skb, CMD_FRAME);
- irlap_send_rr_frame( self, CMD_FRAME);
+ if (self->ack_required) {
+ irlap_send_ui_frame(self, skb, CMD_FRAME);
+ irlap_send_rr_frame(self, CMD_FRAME);
self->ack_required = FALSE;
} else {
skb->data[1] |= PF_BIT;
- irlap_send_ui_frame( self, skb, CMD_FRAME);
+ irlap_send_ui_frame(self, skb, CMD_FRAME);
}
self->window = self->window_size;
- irlap_start_final_timer( self, self->final_timeout);
+ irlap_start_final_timer(self, self->final_timeout);
}
}
@@ -871,8 +761,8 @@ void irlap_send_data_primary_poll( struct irlap_cb *self, struct sk_buff *skb)
* Send I(nformation) frame as secondary with final bit set
*
*/
-void irlap_send_data_secondary_final( struct irlap_cb *self,
- struct sk_buff *skb)
+void irlap_send_data_secondary_final(struct irlap_cb *self,
+ struct sk_buff *skb)
{
struct sk_buff *tx_skb = NULL;
@@ -890,7 +780,6 @@ void irlap_send_data_secondary_final( struct irlap_cb *self,
skb->data[1] = I_FRAME | (self->vs << 1);
tx_skb = skb_clone( skb, GFP_ATOMIC);
- /* tx_skb = skb_copy( skb, GFP_ATOMIC); */
if ( tx_skb == NULL) {
dev_kfree_skb( skb);
return;
@@ -950,7 +839,6 @@ void irlap_send_data_secondary( struct irlap_cb *self, struct sk_buff *skb)
skb->data[1] = I_FRAME | (self->vs << 1);
tx_skb = skb_clone( skb, GFP_ATOMIC);
- /* tx_skb = skb_copy( skb, GFP_ATOMIC); */
if ( tx_skb == NULL) {
dev_kfree_skb( skb);
return;
@@ -991,8 +879,7 @@ void irlap_resend_rejected_frames( struct irlap_cb *self, int command)
ASSERT( self != NULL, return;);
ASSERT( self->magic == LAP_MAGIC, return;);
- DEBUG( 4, __FUNCTION__ "(), retry_count=%d\n",
- self->retry_count);
+ DEBUG(2, __FUNCTION__ "(), retry_count=%d\n", self->retry_count);
/* Initialize variables */
skb = tx_skb = NULL;
@@ -1005,8 +892,11 @@ void irlap_resend_rejected_frames( struct irlap_cb *self, int command)
while ( skb != NULL) {
irlap_wait_min_turn_around( self, &self->qos_tx);
- tx_skb = skb_clone( skb, GFP_ATOMIC);
- /* tx_skb = skb_copy( skb, GFP_ATOMIC); */
+ /* We copy the skb to be retransmitted since we will have to
+ * modify it. Cloning will confuse packet sniffers
+ */
+ /* tx_skb = skb_clone( skb, GFP_ATOMIC); */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
if ( tx_skb == NULL) {
/* Unlink tx_skb from list */
tx_skb->next = tx_skb->prev = NULL;
@@ -1064,12 +954,12 @@ void irlap_resend_rejected_frames( struct irlap_cb *self, int command)
* If send window > 1 then send frame with pf
* bit cleared
*/
- if (( self->window > 1) &&
- skb_queue_len( &self->tx_list) > 0)
+ if ((self->window > 1) &&
+ skb_queue_len(&self->tx_list) > 0)
{
- irlap_send_data_primary( self, skb);
+ irlap_send_data_primary(self, skb);
} else {
- irlap_send_data_primary_poll( self, skb);
+ irlap_send_data_primary_poll(self, skb);
}
}
}
@@ -1081,8 +971,8 @@ void irlap_resend_rejected_frames( struct irlap_cb *self, int command)
* Contruct and transmit an Unnumbered Information (UI) frame
*
*/
-void irlap_send_ui_frame( struct irlap_cb *self, struct sk_buff *skb,
- int command)
+void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
+ int command)
{
__u8 *frame;
@@ -1098,12 +988,7 @@ void irlap_send_ui_frame( struct irlap_cb *self, struct sk_buff *skb,
frame[0] = self->caddr;
frame[0] |= (command) ? CMD_FRAME : 0;
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
-
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
@@ -1111,8 +996,8 @@ void irlap_send_ui_frame( struct irlap_cb *self, struct sk_buff *skb,
*
* Contruct and transmit Information (I) frame
*/
-void irlap_send_i_frame( struct irlap_cb *self, struct sk_buff *skb,
- int command)
+void irlap_send_i_frame(struct irlap_cb *self, struct sk_buff *skb,
+ int command)
{
__u8 *frame;
@@ -1128,69 +1013,41 @@ void irlap_send_i_frame( struct irlap_cb *self, struct sk_buff *skb,
/* Insert next to receive (Vr) */
frame[1] |= (self->vr << 5); /* insert nr */
+
#if 0
{
- int vr, vs, pf;
-
- /* Chech contents of various fields */
- vr = frame[1] >> 5;
- vs = (frame[1] >> 1) & 0x07;
- pf = (frame[1] >> 4) & 0x01;
-
- DEBUG( 0, __FUNCTION__ "(), vs=%d, vr=%d, p=%d, %ld\n",
- vs, vr, pf, jiffies);
+ int ns;
+ ns = (frame[1] >> 1) & 0x07; /* Next to send */
+
+ DEBUG(0, __FUNCTION__ "(), ns=%d\n", ns);
}
-#endif
- skb->dev = self->netdev;
- skb->h.raw = skb->data;
- irlap_insert_mtt( self, skb);
+#endif
- dev_queue_xmit( skb);
- self->stats.tx_packets++;
+ irlap_queue_xmit(self, skb);
}
/*
* Function irlap_recv_i_frame (skb, frame)
*
- * Receive and parse an I (Information) frame
- *
+ * Receive and parse an I (Information) frame, no harm in making it inline
+ * since it's called only from one single place (irlap_input).
*/
-static void irlap_recv_i_frame( struct irlap_cb *self, struct sk_buff *skb,
- struct irlap_info *info, int command)
+static inline void irlap_recv_i_frame(struct irlap_cb *self,
+ struct sk_buff *skb,
+ struct irlap_info *info, int command)
{
- __u8 *frame;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- frame = skb->data;
-
- info->nr = frame[1] >> 5; /* Next to receive */
- info->pf = frame[1] & PF_BIT; /* Final bit */
- info->ns = (frame[1] >> 1) & 0x07; /* Next to send */
-
- DEBUG( 4, __FUNCTION__"(), ns=%d, nr=%d, pf=%d, %ld\n",
- info->ns, info->nr, info->pf>>4, jiffies);
+ info->nr = skb->data[1] >> 5; /* Next to receive */
+ info->pf = skb->data[1] & PF_BIT; /* Final bit */
+ info->ns = (skb->data[1] >> 1) & 0x07; /* Next to send */
- /*
- * Make sure the state-machine is in the right state for receiving,
- * if not, then we just discard the received frame for now!
- * TODO: check if we should queue this frame, or make tty tell that
- * it is receiving frames until the frame is delivered instead of
- * until it is outside a frame.
- */
- if (( self->state != LAP_NRM_P) && ( self->state != LAP_NRM_S)) {
- DEBUG( 0, __FUNCTION__ "(), Wrong state, dropping frame!\n");
- dev_kfree_skb( skb);
- return;
- }
+ DEBUG(4, __FUNCTION__"(), ns=%d, nr=%d, pf=%d, %ld\n",
+ info->ns, info->nr, info->pf>>4, jiffies);
/* Check if this is a command or a response frame */
- if ( command)
- irlap_do_event( self, RECV_I_CMD, skb, info);
+ if (command)
+ irlap_do_event(self, RECV_I_CMD, skb, info);
else
- irlap_do_event( self, RECV_I_RSP, skb, info);
+ irlap_do_event(self, RECV_I_RSP, skb, info);
}
/*
@@ -1264,29 +1121,101 @@ static void irlap_recv_frmr_frame( struct irlap_cb *self, struct sk_buff *skb,
}
/*
- * Function irlap_input (skb)
+ * Function irlap_send_test_frame (self, daddr)
+ *
+ * Send a test frame response
*
- * Called when a frame is received. Dispatches the right receive function
- * for processing of the frame.
*/
-int irlap_input( struct sk_buff *skb, struct device *netdev,
- struct packet_type *ptype)
+void irlap_send_test_frame(struct irlap_cb *self, __u32 daddr,
+ struct sk_buff *cmd)
+{
+ struct sk_buff *skb;
+ struct test_frame *frame;
+
+ skb = dev_alloc_skb(32);
+ if (!skb)
+ return;
+
+ skb_put(skb, sizeof(struct test_frame));
+
+ frame = (struct test_frame *) skb->data;
+
+ /* Build header */
+ if (self->state == LAP_NDM)
+ frame->caddr = CBROADCAST; /* Send response */
+ else
+ frame->caddr = self->caddr;
+
+ frame->control = TEST_RSP;
+
+ /* Insert the swapped addresses */
+ frame->saddr = cpu_to_le32(self->saddr);
+ frame->daddr = cpu_to_le32(daddr);
+
+ /* Copy info */
+ skb_put(skb, cmd->len);
+ memcpy(frame->info, cmd->data, cmd->len);
+
+ /* Return to sender */
+ irlap_wait_min_turn_around(self, &self->qos_tx);
+ irlap_queue_xmit(self, skb);
+}
+
+/*
+ * Function irlap_recv_test_frame (self, skb)
+ *
+ * Receive a test frame
+ *
+ */
+void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb,
+ struct irlap_info *info, int command)
+{
+ struct test_frame *frame;
+
+ DEBUG(0, __FUNCTION__ "()\n");
+
+ if (skb->len < sizeof(struct test_frame)) {
+ DEBUG(0, __FUNCTION__ "() test frame to short!\n");
+ return;
+ }
+
+ frame = (struct test_frame *) skb->data;
+
+ /* Read and swap addresses */
+ info->daddr = le32_to_cpu(frame->saddr);
+ info->saddr = le32_to_cpu(frame->daddr);
+
+ if (command)
+ irlap_do_event(self, RECV_TEST_CMD, skb, info);
+ else
+ irlap_do_event(self, RECV_TEST_RSP, skb, info);
+}
+
+/*
+ * Function irlap_driver_rcv (skb, netdev, ptype)
+ *
+ * Called when a frame is received. Dispatches the right receive function
+ * for processing of the frame.
+ *
+ */
+int irlap_driver_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *ptype)
{
struct irlap_info info;
struct irlap_cb *self;
struct irda_device *idev;
__u8 *frame;
- int i, command;
+ int command;
__u8 control;
- idev = ( struct irda_device *) netdev->priv;
+ idev = (struct irda_device *) dev->priv;
ASSERT( idev != NULL, return -1;);
self = idev->irlap;
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == LAP_MAGIC, return -1;);
- ASSERT(( skb != NULL) && (skb->len > 1), return -1;);
+ ASSERT( skb->len > 1, return -1;);
frame = skb->data;
@@ -1301,24 +1230,18 @@ int irlap_input( struct sk_buff *skb, struct device *netdev,
/*
* First check if this frame addressed to us
*/
- if (( info.caddr != self->caddr) && ( info.caddr != CBROADCAST)) {
-
- DEBUG( 0, __FUNCTION__ "(), Received frame is not for us!\n");
- for(i=0; i<(skb->len < 15?skb->len:15);i++) {
- printk( "%02x ", frame[i]);
- }
- printk("\n");
-
- dev_kfree_skb( skb);
+ if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) {
+ DEBUG(2, __FUNCTION__ "(), Received frame is not for us!\n");
+ dev_kfree_skb(skb);
return 0;
}
/*
* Optimize for the common case and check if the frame is an
* I(nformation) frame. Only I-frames have bit 0 set to 0
*/
- if( ~control & 0x01) {
- irlap_recv_i_frame( self, skb, &info, command);
+ if(~control & 0x01) {
+ irlap_recv_i_frame(self, skb, &info, command);
self->stats.rx_packets++;
return 0;
}
@@ -1326,19 +1249,17 @@ int irlap_input( struct sk_buff *skb, struct device *netdev,
* We now check is the frame is an S(upervisory) frame. Only
* S-frames have bit 0 set to 1 and bit 1 set to 0
*/
- if ( ~control & 0x02) {
+ if (~control & 0x02) {
/*
* Received S(upervisory) frame, check which frame type it is
* only the first nibble is of interest
*/
- switch( control & 0x0f) {
+ switch(control & 0x0f) {
case RR:
irlap_recv_rr_frame( self, skb, &info, command);
self->stats.rx_packets++;
break;
case RNR:
- DEBUG( 4, "*** RNR frame received! pf = %d ***\n",
- info.pf >> 4);
irlap_recv_rnr_frame( self, skb, &info);
self->stats.rx_packets++;
break;
@@ -1358,37 +1279,32 @@ int irlap_input( struct sk_buff *skb, struct device *netdev,
/*
* This must be a C(ontrol) frame
*/
- switch( control) {
+ switch(control) {
case XID_RSP:
- DEBUG( 4, "XID rsp frame received!\n");
- irlap_recv_discovery_xid_rsp( self, skb, &info);
+ irlap_recv_discovery_xid_rsp(self, skb, &info);
break;
case XID_CMD:
- DEBUG( 4, "XID cmd frame received!\n");
- irlap_recv_discovery_xid_cmd( self, skb, &info);
+ irlap_recv_discovery_xid_cmd(self, skb, &info);
break;
case SNRM_CMD:
- DEBUG( 4, "SNRM frame received!\n");
- irlap_recv_snrm_cmd( self, skb, &info);
+ irlap_recv_snrm_cmd(self, skb, &info);
break;
case DM_RSP:
DEBUG( 0, "DM rsp frame received!\n");
- irlap_next_state( self, LAP_NDM);
+ irlap_next_state(self, LAP_NDM);
break;
case DISC_CMD:
- DEBUG( 0, "DISC cmd frame received!\n");
- irlap_do_event( self, RECV_DISC_FRAME, skb, &info);
+ irlap_do_event(self, RECV_DISC_FRAME, skb, &info);
break;
case TEST_CMD:
- DEBUG( 0, "Test frame received!\n");
- dev_kfree_skb( skb);
+ DEBUG(0,__FUNCTION__ "(), TEST_FRAME\n");
+ irlap_recv_test_frame(self, skb, &info, command);
break;
case UA_RSP:
DEBUG( 4, "UA rsp frame received!\n");
irlap_recv_ua_frame( self, skb, &info);
break;
case FRMR_RSP:
- DEBUG( 4, "FRMR_RSP recevied!\n");
irlap_recv_frmr_frame( self, skb, &info);
break;
case UI_FRAME:
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index c4f7c2b8d..d76661b6c 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlmp.c
- * Version: 0.8
+ * Version: 0.9
* Description: IrDA Link Management Protocol (LMP) layer
- * Status: Experimental.
+ * Status: Stable.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 17 20:54:32 1997
- * Modified at: Sat Jan 16 22:13:20 1999
+ * Modified at: Fri Apr 23 09:13:24 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -30,6 +30,9 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/random.h>
+#include <linux/irda.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -39,18 +42,28 @@
#include <net/irda/iriap.h>
#include <net/irda/irlmp.h>
#include <net/irda/irlmp_frame.h>
-#include <linux/kmod.h>
/* Master structure */
struct irlmp_cb *irlmp = NULL;
-int sysctl_discovery = 0;
+/* These can be altered by the sysctl interface */
+int sysctl_discovery = 0;
+int sysctl_discovery_slots = 6;
char sysctl_devname[65];
+char *lmp_reasons[] = {
+ "ERROR, NOT USED",
+ "LM_USER_REQUEST",
+ "LM_LAP_DISCONNECT",
+ "LM_CONNECT_FAILURE",
+ "LM_LAP_RESET",
+ "LM_INIT_DISCONNECT",
+ "ERROR, NOT USED",
+};
+
__u8 *irlmp_hint_to_service( __u8 *hint);
#ifdef CONFIG_PROC_FS
-int irlmp_proc_read( char *buf, char **start, off_t offset, int len,
- int unused);
+int irlmp_proc_read(char *buf, char **start, off_t offst, int len, int unused);
#endif
/*
@@ -61,8 +74,6 @@ int irlmp_proc_read( char *buf, char **start, off_t offset, int len,
*/
__initfunc(int irlmp_init(void))
{
- DEBUG( 4, "--> irlmp_init\n");
-
/* Initialize the irlmp structure. */
if ( irlmp == NULL) {
irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL);
@@ -73,19 +84,21 @@ __initfunc(int irlmp_init(void))
irlmp->magic = LMP_MAGIC;
- irlmp->registry = hashbin_new( HB_LOCAL);
- irlmp->links = hashbin_new( HB_LOCAL);
- irlmp->unconnected_lsaps = hashbin_new( HB_GLOBAL);
+ irlmp->clients = hashbin_new(HB_GLOBAL);
+ irlmp->services = hashbin_new(HB_GLOBAL);
+ irlmp->links = hashbin_new(HB_GLOBAL);
+ irlmp->unconnected_lsaps = hashbin_new(HB_GLOBAL);
+ irlmp->cachelog = hashbin_new(HB_GLOBAL);
irlmp->free_lsap_sel = 0x10; /* Servers use 0x00-0x0f */
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
irlmp->cache.valid = FALSE;
#endif
- strcpy( sysctl_devname, "Linux");
+ strcpy(sysctl_devname, "Linux");
/* Do discovery every 3 seconds */
- init_timer( &irlmp->discovery_timer);
- irlmp_start_discovery_timer( irlmp, 600);
+ init_timer(&irlmp->discovery_timer);
+ irlmp_start_discovery_timer(irlmp, 600);
return 0;
}
@@ -99,18 +112,19 @@ __initfunc(int irlmp_init(void))
void irlmp_cleanup(void)
{
/* Check for main structure */
- ASSERT( irlmp != NULL, return;);
- ASSERT( irlmp->magic == LMP_MAGIC, return;);
+ ASSERT(irlmp != NULL, return;);
+ ASSERT(irlmp->magic == LMP_MAGIC, return;);
- del_timer( &irlmp->discovery_timer);
+ del_timer(&irlmp->discovery_timer);
- /* FIXME, we need a special function to deallocate LAPs */
- hashbin_delete( irlmp->links, (FREE_FUNC) kfree);
- hashbin_delete( irlmp->unconnected_lsaps, (FREE_FUNC) kfree);
- hashbin_delete( irlmp->registry, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->links, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->unconnected_lsaps, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->clients, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->services, (FREE_FUNC) kfree);
+ hashbin_delete(irlmp->cachelog, (FREE_FUNC) kfree);
/* De-allocate main structure */
- kfree( irlmp);
+ kfree(irlmp);
irlmp = NULL;
}
@@ -120,20 +134,20 @@ void irlmp_cleanup(void)
* Register with IrLMP and create a local LSAP,
* returns handle to LSAP.
*/
-struct lsap_cb *irlmp_open_lsap( __u8 slsap_sel, struct notify_t *notify)
+struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, struct notify_t *notify)
{
struct lsap_cb *self;
- ASSERT( notify != NULL, return NULL;);
- ASSERT( irlmp != NULL, return NULL;);
- ASSERT( irlmp->magic == LMP_MAGIC, return NULL;);
+ ASSERT(notify != NULL, return NULL;);
+ ASSERT(irlmp != NULL, return NULL;);
+ ASSERT(irlmp->magic == LMP_MAGIC, return NULL;);
- DEBUG( 4, "irlmp_open_lsap(), slsap_sel=%02x\n", slsap_sel);
+ DEBUG(4, __FUNCTION__ "(), slsap_sel=%02x\n", slsap_sel);
/*
* Does the client care which Source LSAP selector it gets?
*/
- if ( slsap_sel == LSAP_ANY) {
+ if (slsap_sel == LSAP_ANY) {
/*
* Find unused LSAP
*/
@@ -145,40 +159,39 @@ struct lsap_cb *irlmp_open_lsap( __u8 slsap_sel, struct notify_t *notify)
* Client wants specific LSAP, so check if it's already
* in use
*/
- if ( irlmp_slsap_inuse( slsap_sel)) {
+ if (irlmp_slsap_inuse(slsap_sel)) {
return NULL;
}
- if ( slsap_sel > irlmp->free_lsap_sel)
- irlmp->free_lsap_sel = slsap_sel+1;
}
/*
* Allocate new instance of a LSAP connection
*/
- self = kmalloc( sizeof(struct lsap_cb), GFP_ATOMIC);
- if ( self == NULL) {
+ self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
+ if (self == NULL) {
printk( KERN_ERR "IrLMP: Can't allocate memory for "
"LSAP control block!\n");
return NULL;
}
- memset( self, 0, sizeof(struct lsap_cb));
+ memset(self, 0, sizeof(struct lsap_cb));
self->magic = LMP_LSAP_MAGIC;
self->slsap_sel = slsap_sel;
self->dlsap_sel = LSAP_ANY;
+ self->connected = FALSE;
- init_timer( &self->watchdog_timer);
+ init_timer(&self->watchdog_timer);
- ASSERT( notify->instance != NULL, return NULL;);
+ ASSERT(notify->instance != NULL, return NULL;);
self->notify = *notify;
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
/*
* Insert into queue of unconnected LSAPs
*/
- hashbin_insert( irlmp->unconnected_lsaps, (QUEUE *) self,
- self->slsap_sel, NULL);
+ hashbin_insert(irlmp->unconnected_lsaps, (QUEUE *) self, (int) self,
+ NULL);
return self;
}
@@ -186,70 +199,62 @@ struct lsap_cb *irlmp_open_lsap( __u8 slsap_sel, struct notify_t *notify)
/*
* Function irlmp_close_lsap (self)
*
- * Remove an instance of a LSAP
+ * Remove an instance of LSAP
*/
-static void __irlmp_close_lsap( struct lsap_cb *self)
+static void __irlmp_close_lsap(struct lsap_cb *self)
{
- DEBUG( 4, "irlmp_close()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
/*
* Set some of the variables to preset values
*/
self->magic = ~LMP_LSAP_MAGIC;
- del_timer( &self->watchdog_timer); /* Important! */
+ del_timer(&self->watchdog_timer); /* Important! */
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
- ASSERT( irlmp != NULL, return;);
+ ASSERT(irlmp != NULL, return;);
irlmp->cache.valid = FALSE;
#endif
- /*
- * Deallocate structure
- */
- kfree( self);
-
- DEBUG( 4, "irlmp_close() -->\n");
+ kfree(self);
}
/*
* Function irlmp_close_lsap (self)
*
- *
+ * Close and remove LSAP
*
*/
-void irlmp_close_lsap( struct lsap_cb *self)
+void irlmp_close_lsap(struct lsap_cb *self)
{
struct lap_cb *lap;
- struct lsap_cb *lsap;
+ struct lsap_cb *lsap = NULL;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
-
- lap = self->lap;
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
/*
* Find out if we should remove this LSAP from a link or from the
* list of unconnected lsaps (not associated with a link)
*/
- if ( lap == NULL) {
- lsap = hashbin_remove( irlmp->unconnected_lsaps,
- self->slsap_sel, NULL);
- } else {
- ASSERT( lap != NULL, return;);
- ASSERT( lap->magic == LMP_LAP_MAGIC, return;);
-
- lsap = hashbin_remove( lap->lsaps, self->slsap_sel, NULL);
+ lap = self->lap;
+ if (lap) {
+ ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
+ lsap = hashbin_remove(lap->lsaps, (int) self, NULL);
+ }
+ /* Check if we found the LSAP! If not then try the unconnected lsaps */
+ if (!lsap) {
+ lsap = hashbin_remove(irlmp->unconnected_lsaps, (int) self,
+ NULL);
}
- if ( lsap == NULL) {
- DEBUG( 0, __FUNCTION__
+ if (!lsap) {
+ DEBUG(0, __FUNCTION__
"(), Looks like somebody has removed me already!\n");
return;
}
- ASSERT( lsap == self, return;);
-
- __irlmp_close_lsap( self);
+ __irlmp_close_lsap(self);
}
/*
@@ -259,47 +264,47 @@ void irlmp_close_lsap( struct lsap_cb *self)
* instances of the IrLAP layer, each connected to different IrDA ports
*
*/
-void irlmp_register_irlap( struct irlap_cb *irlap, __u32 saddr,
- struct notify_t *notify)
+void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr,
+ struct notify_t *notify)
{
struct lap_cb *lap;
- DEBUG( 4, __FUNCTION__ "(), Registered IrLAP, saddr = %08x\n",
- saddr);
+ DEBUG(4, __FUNCTION__ "(), Registered IrLAP, saddr = %08x\n", saddr);
- ASSERT( irlmp != NULL, return;);
- ASSERT( irlmp->magic == LMP_MAGIC, return;);
- ASSERT( notify != NULL, return;);
+ ASSERT(irlmp != NULL, return;);
+ ASSERT(irlmp->magic == LMP_MAGIC, return;);
+ ASSERT(notify != NULL, return;);
/*
* Allocate new instance of a LSAP connection
*/
- lap = kmalloc( sizeof(struct lap_cb), GFP_KERNEL);
- if ( lap == NULL) {
- printk( KERN_ERR "IrLMP: Can't allocate memory for "
- "LAP control block!\n");
+ lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL);
+ if (lap == NULL) {
+ DEBUG(3, __FUNCTION__ "(), unable to kmalloc\n");
return;
}
- memset( lap, 0, sizeof(struct lap_cb));
+ memset(lap, 0, sizeof(struct lap_cb));
lap->irlap = irlap;
lap->magic = LMP_LAP_MAGIC;
lap->saddr = saddr;
- lap->lsaps = hashbin_new( HB_GLOBAL);
- lap->cachelog = hashbin_new( HB_LOCAL);
+ lap->daddr = DEV_ADDR_ANY;
+ lap->lsaps = hashbin_new(HB_GLOBAL);
- irlmp_next_lap_state( lap, LAP_STANDBY);
+ irlmp_next_lap_state(lap, LAP_STANDBY);
+ init_timer(&lap->idle_timer);
+
/*
* Insert into queue of unconnected LSAPs
*/
- hashbin_insert( irlmp->links, (QUEUE *) lap, lap->saddr, NULL);
+ hashbin_insert(irlmp->links, (QUEUE *) lap, lap->saddr, NULL);
/*
* We set only this variable so IrLAP can tell us on which link the
* different events happened on
*/
- irda_notify_init( notify);
+ irda_notify_init(notify);
notify->instance = lap;
}
@@ -309,36 +314,23 @@ void irlmp_register_irlap( struct irlap_cb *irlap, __u32 saddr,
* IrLAP layer has been removed!
*
*/
-void irlmp_unregister_irlap( __u32 saddr)
+void irlmp_unregister_link(__u32 saddr)
{
- struct lap_cb *self;
+ struct lap_cb *link;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- self = hashbin_remove( irlmp->links, saddr, NULL);
- if ( self != NULL) {
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
+ link = hashbin_remove(irlmp->links, saddr, NULL);
+ if (link) {
+ ASSERT(link->magic == LMP_LAP_MAGIC, return;);
- self->magic = ~LMP_LAP_MAGIC;
- kfree( self);
- } else {
- DEBUG( 0, "irlmp_unregister_irlap(), Didn't find LAP!\n");
- }
-}
-
-void dump_discoveries( hashbin_t *log)
-{
- DISCOVERY *d;
+ /* Remove all discoveries discovered at this link */
+ irlmp_expire_discoveries(irlmp->cachelog, link->saddr, TRUE);
- ASSERT( log != NULL, return;);
+ del_timer(&link->idle_timer);
- d = (DISCOVERY *) hashbin_get_first( log);
- while( d != NULL) {
- DEBUG( 0, "Discovery:\n");
- DEBUG( 0, " daddr=%08x\n", d->daddr);
- DEBUG( 0, " name=%s\n", d->info);
-
- d = (DISCOVERY *) hashbin_get_next( log);
+ link->magic = 0;
+ kfree(link);
}
}
@@ -348,100 +340,98 @@ void dump_discoveries( hashbin_t *log)
* Connect with a peer LSAP
*
*/
-void irlmp_connect_request( struct lsap_cb *self, __u8 dlsap_sel, __u32 daddr,
- struct qos_info *qos, struct sk_buff *userdata)
+int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *qos, struct sk_buff *userdata)
{
struct sk_buff *skb = NULL;
struct lap_cb *lap;
struct lsap_cb *lsap;
- unsigned long flags;
+ discovery_t *discovery;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT( self != NULL, return -1;);
+ ASSERT( self->magic == LMP_LSAP_MAGIC, return -1;);
- DEBUG( 4, "irlmp_connect_request(), "
- "slsap_sel=%02x, dlsap_sel=%02x, daddr=%08x\n",
- self->slsap_sel, dlsap_sel, daddr);
+ DEBUG(2, __FUNCTION__
+ "(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n",
+ self->slsap_sel, dlsap_sel, saddr, daddr);
+
+ if ( self->connected)
+ return -EISCONN;
- if ( self->connected) {
- DEBUG( 0, __FUNCTION__ "(), Error: already connected!!\n");
-
- return;
- }
+ /* Client must supply destination device address */
+ if (!daddr)
+ return -EINVAL;
/* Any userdata? */
- if ( userdata == NULL) {
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Could not allocate sk_buff of length %d\n",
- 64);
- return;
- }
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ if (userdata == NULL) {
+ skb = dev_alloc_skb(64);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, LMP_CONTROL_HEADER+LAP_HEADER);
} else
skb = userdata;
/* Make room for MUX control header ( 3 bytes) */
- ASSERT( skb_headroom( skb) >= LMP_CONTROL_HEADER, return;);
- skb_push( skb, LMP_CONTROL_HEADER);
+ ASSERT(skb_headroom(skb) >= LMP_CONTROL_HEADER, return -1;);
+ skb_push(skb, LMP_CONTROL_HEADER);
self->dlsap_sel = dlsap_sel;
self->tmp_skb = skb;
- /*
- * Find out which link to connect on, and make sure nothing strange
- * happens while we traverse the list
+ /*
+ * Find the link to where we should try to connect since there may
+ * be more than one IrDA port on this machine. If the client has
+ * passed us the saddr (and already knows which link to use), then
+ * we use that to find the link, if not then we have to look in the
+ * discovery log and check if any of the links has discovered a
+ * device with the given daddr
*/
- save_flags( flags);
- cli();
-
- lap = (struct lap_cb *) hashbin_get_first( irlmp->links);
- while ( lap != NULL) {
- ASSERT( lap->magic == LMP_LAP_MAGIC, return;);
- /* dump_discoveries( lap->cachelog); */
+ if (!saddr) {
+ discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
+ if (discovery)
+ saddr = discovery->saddr;
+ }
+ lap = hashbin_find(irlmp->links, saddr, NULL);
+ if (lap == NULL) {
+ DEBUG(1, __FUNCTION__ "(), Unable to find a usable link!\n");
+ return -EHOSTUNREACH;
+ }
- if ( hashbin_find( lap->cachelog, daddr, NULL)) {
- DEBUG( 4, "irlmp_connect_request() found link to connect on!\n");
- self->lap = lap;
- break;
- }
- lap = (struct lap_cb *) hashbin_get_next( irlmp->links);
+ if (lap->daddr == DEV_ADDR_ANY)
+ lap->daddr = daddr;
+ else if (lap->daddr != daddr) {
+ DEBUG(0, __FUNCTION__ "(), sorry, but link is busy!\n");
+ return -EBUSY;
}
- restore_flags(flags);
-
+
+ self->lap = lap;
+
/*
* Remove LSAP from list of unconnected LSAPs and insert it into the
- * list of connected LSAPs for the particular link */
- lsap = hashbin_remove( irlmp->unconnected_lsaps, self->slsap_sel,
- NULL);
-
- /* Check if we found a link to connect on */
- if ( self->lap == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Unable to find a usable link!\n");
- return;
- }
+ * list of connected LSAPs for the particular link
+ */
+ lsap = hashbin_remove(irlmp->unconnected_lsaps, (int) self, NULL);
- ASSERT( lsap != NULL, return;);
- ASSERT( lsap->magic == LMP_LSAP_MAGIC, return;);
- ASSERT( lsap->lap != NULL, return;);
- ASSERT( lsap->lap->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(lsap != NULL, return -1;);
+ ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;);
+ ASSERT(lsap->lap != NULL, return -1;);
+ ASSERT(lsap->lap->magic == LMP_LAP_MAGIC, return -1;);
- hashbin_insert( self->lap->lsaps, (QUEUE *) self, self->slsap_sel,
- NULL);
+ hashbin_insert(self->lap->lsaps, (QUEUE *) self, (int) self, NULL);
self->connected = TRUE;
-
+
/*
* User supplied qos specifications?
*/
- if ( qos)
+ if (qos)
self->qos = *qos;
- DEBUG( 4, "*** Connecting SLSAP=%02x, DLSAP= %02x\n",
- self->slsap_sel, self->dlsap_sel);
-
- irlmp_do_lsap_event( self, LM_CONNECT_REQUEST, skb);
+ irlmp_do_lsap_event(self, LM_CONNECT_REQUEST, skb);
+
+ return 0;
}
/*
@@ -450,29 +440,28 @@ void irlmp_connect_request( struct lsap_cb *self, __u8 dlsap_sel, __u32 daddr,
* Incomming connection
*
*/
-void irlmp_connect_indication( struct lsap_cb *self, struct sk_buff *skb)
+void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb)
{
int max_seg_size;
- DEBUG( 4, "irlmp_connect_indication()\n");
+ DEBUG(3, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
- ASSERT( self->lap != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(self->lap != NULL, return;);
self->qos = *self->lap->qos;
max_seg_size = self->lap->qos->data_size.value;
- DEBUG( 4, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
+ DEBUG(4, __FUNCTION__ "(), max_seg_size=%d\n", max_seg_size);
/* Hide LMP_CONTROL_HEADER header from layer above */
- skb_pull( skb, LMP_CONTROL_HEADER);
+ skb_pull(skb, LMP_CONTROL_HEADER);
- if ( self->notify.connect_indication)
- self->notify.connect_indication( self->notify.instance, self,
- &self->qos, max_seg_size,
- skb);
+ if (self->notify.connect_indication)
+ self->notify.connect_indication(self->notify.instance, self,
+ &self->qos, max_seg_size, skb);
}
/*
@@ -483,7 +472,7 @@ void irlmp_connect_indication( struct lsap_cb *self, struct sk_buff *skb)
*/
void irlmp_connect_response( struct lsap_cb *self, struct sk_buff *userdata)
{
- DEBUG( 4, "irlmp_connect_response()\n");
+ DEBUG(3, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
@@ -506,11 +495,11 @@ void irlmp_connect_response( struct lsap_cb *self, struct sk_buff *userdata)
*
* LSAP connection confirmed peer device!
*/
-void irlmp_connect_confirm( struct lsap_cb *self, struct sk_buff *skb)
+void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
{
int max_seg_size;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(3, __FUNCTION__ "()\n");
ASSERT( skb != NULL, return;);
ASSERT( self != NULL, return;);
@@ -532,23 +521,63 @@ void irlmp_connect_confirm( struct lsap_cb *self, struct sk_buff *skb)
}
/*
+ * Function irlmp_dup (orig, instance)
+ *
+ * Duplicate LSAP, can be used by servers to confirm a connection on a
+ * new LSAP so it can keep listening on the old one.
+ *
+ */
+struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
+{
+ struct lsap_cb *new;
+
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ /* Only allowed to duplicate unconnected LSAP's */
+ if (!hashbin_find(irlmp->unconnected_lsaps, (int) orig, NULL)) {
+ DEBUG(0, __FUNCTION__ "(), unable to find LSAP\n");
+ return NULL;
+ }
+ new = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
+ if (!new) {
+ DEBUG(0, __FUNCTION__ "(), unable to kmalloc\n");
+ return NULL;
+ }
+ /* Dup */
+ memcpy(new, orig, sizeof(struct lsap_cb));
+ new->notify.instance = instance;
+
+ init_timer(&new->watchdog_timer);
+
+ hashbin_insert(irlmp->unconnected_lsaps, (QUEUE *) new, (int) new,
+ NULL);
+
+ /* Make sure that we invalidate the cache */
+#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
+ irlmp->cache.valid = FALSE;
+#endif /* CONFIG_IRDA_CACHE_LAST_LSAP */
+
+ return new;
+}
+
+/*
* Function irlmp_disconnect_request (handle, userdata)
*
* The service user is requesting disconnection, this will not remove the
* LSAP, but only mark it as disconnected
*/
-void irlmp_disconnect_request( struct lsap_cb *self, struct sk_buff *userdata)
+void irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
{
struct lsap_cb *lsap;
- DEBUG( 4, "irlmp_disconnect_request()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
/* Already disconnected? */
if ( !self->connected) {
- DEBUG( 0, __FUNCTION__ "(), already disconnected!\n");
+ DEBUG( 1, __FUNCTION__ "(), already disconnected!\n");
return;
}
@@ -571,14 +600,14 @@ void irlmp_disconnect_request( struct lsap_cb *self, struct sk_buff *userdata)
ASSERT( self->lap->magic == LMP_LAP_MAGIC, return;);
ASSERT( self->lap->lsaps != NULL, return;);
- lsap = hashbin_remove( self->lap->lsaps, self->slsap_sel, NULL);
+ lsap = hashbin_remove(self->lap->lsaps, (int) self, NULL);
ASSERT( lsap != NULL, return;);
ASSERT( lsap->magic == LMP_LSAP_MAGIC, return;);
ASSERT( lsap == self, return;);
- hashbin_insert( irlmp->unconnected_lsaps, (QUEUE *) self,
- self->slsap_sel, NULL);
+ hashbin_insert(irlmp->unconnected_lsaps, (QUEUE *) self, (int) self,
+ NULL);
/* Reset some values */
self->connected = FALSE;
@@ -596,37 +625,32 @@ void irlmp_disconnect_indication( struct lsap_cb *self, LM_REASON reason,
{
struct lsap_cb *lsap;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG( 1, __FUNCTION__ "(), reason=%s\n", lmp_reasons[reason]);
ASSERT( self != NULL, return;);
ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
ASSERT( self->connected == TRUE, return;);
- DEBUG( 4, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
+ DEBUG( 3, __FUNCTION__ "(), slsap_sel=%02x, dlsap_sel=%02x\n",
self->slsap_sel, self->dlsap_sel);
self->connected = FALSE;
self->dlsap_sel = LSAP_ANY;
/*
- * Remove assosiation betwen this LSAP and the kink it used
+ * Remove association between this LSAP and the link it used
*/
ASSERT( self->lap != NULL, return;);
ASSERT( self->lap->lsaps != NULL, return;);
- lsap = hashbin_remove( self->lap->lsaps, self->slsap_sel, NULL);
+ lsap = hashbin_remove( self->lap->lsaps, (int) self, NULL);
ASSERT( lsap != NULL, return;);
ASSERT( lsap == self, return;);
- hashbin_insert( irlmp->unconnected_lsaps, (QUEUE *) lsap,
- lsap->slsap_sel, NULL);
+ hashbin_insert(irlmp->unconnected_lsaps, (QUEUE *) lsap, (int) lsap,
+ NULL);
self->lap = NULL;
-
- /* FIXME: the reasons should be extracted somewhere else? */
- if ( userdata) {
- DEBUG( 4, __FUNCTION__ "(), reason=%02x\n", userdata->data[3]);
- }
/*
* Inform service user
@@ -638,28 +662,24 @@ void irlmp_disconnect_indication( struct lsap_cb *self, LM_REASON reason,
}
/*
- * Function irlmp_discovery_request (nslots)
+ * Function irlmp_do_discovery (nslots)
*
- * Do a discovery of devices in front of the computer
+ * Do some discovery on all links
*
*/
-void irlmp_discovery_request( int nslots)
+void irlmp_do_discovery(int nslots)
{
struct lap_cb *lap;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( irlmp != NULL, return;);
-
- if ( !sysctl_discovery)
- return;
+ /* Make sure value is sane */
+ if ((nslots != 1) && (nslots != 6) && (nslots != 8)&&(nslots != 16)) {
+ printk(KERN_WARNING __FUNCTION__
+ "(), invalid value for number of slots!\n");
+ nslots = sysctl_discovery_slots = 8;
+ }
- /*
- * Construct new discovery info to be used by IrLAP,
- * TODO: no need to do this every time!
- */
- irlmp->discovery_cmd.hint[0] = irlmp->hint[0];
- irlmp->discovery_cmd.hint[1] = irlmp->hint[1];
+ /* Construct new discovery info to be used by IrLAP, */
+ irlmp->discovery_cmd.hints.word = irlmp->hints.word;
/*
* Set character set for device name (we use ASCII), and
@@ -667,138 +687,164 @@ void irlmp_discovery_request( int nslots)
* end
*/
irlmp->discovery_cmd.charset = CS_ASCII;
-
- strncpy( irlmp->discovery_cmd.info, sysctl_devname, 31);
- irlmp->discovery_cmd.info_len = strlen( irlmp->discovery_cmd.info);
+ strncpy(irlmp->discovery_cmd.info, sysctl_devname, 31);
+ irlmp->discovery_cmd.info_len = strlen(irlmp->discovery_cmd.info);
+ irlmp->discovery_cmd.nslots = nslots;
/*
* Try to send discovery packets on all links
*/
- lap = ( struct lap_cb *) hashbin_get_first( irlmp->links);
- while ( lap != NULL) {
- ASSERT( lap->magic == LMP_LAP_MAGIC, return;);
-
- DEBUG( 4, "irlmp_discovery_request() sending request!\n");
- irlmp_do_lap_event( lap, LM_LAP_DISCOVERY_REQUEST, NULL);
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
- lap = ( struct lap_cb *) hashbin_get_next( irlmp->links);
+ if (lap->lap_state == LAP_STANDBY) {
+ /* Expire discoveries discovered on this link */
+ irlmp_expire_discoveries(irlmp->cachelog, lap->saddr,
+ FALSE);
+
+ /* Try to discover */
+ irlmp_do_lap_event(lap, LM_LAP_DISCOVERY_REQUEST,
+ NULL);
+ }
+ lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
}
}
/*
+ * Function irlmp_discovery_request (nslots)
+ *
+ * Do a discovery of devices in front of the computer
+ *
+ */
+void irlmp_discovery_request(int nslots)
+{
+ DEBUG(4, __FUNCTION__ "(), nslots=%d\n", nslots);
+
+ /* Check if user wants to override the default */
+ if (nslots == DISCOVERY_DEFAULT_SLOTS)
+ nslots = sysctl_discovery_slots;
+
+ /*
+ * If discovery is already running, then just return the current
+ * discovery log
+ */
+ if (sysctl_discovery) {
+ DEBUG(2, __FUNCTION__ "() discovery already running, so we"
+ " just return the old discovery log!\n");
+ irlmp_discovery_confirm(irlmp->cachelog);
+ } else
+ irlmp_do_discovery(nslots);
+}
+
+#if 0
+/*
* Function irlmp_check_services (discovery)
*
*
*
*/
-void irlmp_check_services( DISCOVERY *discovery)
+void irlmp_check_services(discovery_t *discovery)
{
- struct irlmp_registration *entry;
+ struct irlmp_client *client;
struct irmanager_event event;
- __u8 *service;
+ __u8 *service_log;
+ __u8 service;
int i = 0;
- printk( KERN_INFO "IrDA Discovered: %s\n", discovery->info);
- printk( KERN_INFO " Services: ");
+ DEBUG(1, "IrDA Discovered: %s\n", discovery->info);
+ DEBUG(1, " Services: ");
- service = irlmp_hint_to_service( discovery->hint);
- if (service != NULL) {
- /*
- * Check all services on the device
- */
- while ( service[i] != S_END) {
- DEBUG( 4, "service=%02x\n", service[i]);
- entry = hashbin_find( irlmp->registry,
- service[i], NULL);
- if ( entry && entry->discovery_callback) {
- DEBUG( 4, "discovery_callback!\n");
- entry->discovery_callback( discovery);
- } else {
- /*
- * Found no clients for dealing with this
- * service, so ask the user space irmanager
- * to try to load the right module for us
- */
-
- event.event = EVENT_DEVICE_DISCOVERED;
- event.service = service[i];
- event.daddr = discovery->daddr;
- sprintf( event.info, "%s",
- discovery->info);
- irmanager_notify( &event);
- }
- i++; /* Next service */
+ service_log = irlmp_hint_to_service(discovery->hints.byte);
+ if (!service_log)
+ return;
+
+ /*
+ * Check all services on the device
+ */
+ while ((service = service_log[i++]) != S_END) {
+ DEBUG( 4, "service=%02x\n", service);
+ client = hashbin_find(irlmp->registry, service, NULL);
+ if (entry && entry->discovery_callback) {
+ DEBUG( 4, "discovery_callback!\n");
+
+ entry->discovery_callback(discovery);
+ } else {
+ /* Don't notify about the ANY service */
+ if (service == S_ANY)
+ continue;
+ /*
+ * Found no clients for dealing with this service,
+ * so ask the user space irmanager to try to load
+ * the right module for us
+ */
+ event.event = EVENT_DEVICE_DISCOVERED;
+ event.service = service;
+ event.daddr = discovery->daddr;
+ sprintf(event.info, "%s", discovery->info);
+ irmanager_notify(&event);
}
- kfree( service);
}
+ kfree(service_log);
}
-
+#endif
/*
- * Function irlmp_discovery_confirm ( self, log)
+ * Function irlmp_notify_client (log)
+ *
+ * Notify all about discovered devices
*
- * Some device(s) answered to our discovery request! Check to see which
- * device it is, and give indication to the client(s)
- *
*/
-void irlmp_discovery_confirm( struct lap_cb *self, hashbin_t *log)
+void irlmp_notify_client(irlmp_client_t *client, hashbin_t *log)
{
- DISCOVERY *discovery;
+ discovery_t *discovery;
+
+ DEBUG(3, __FUNCTION__ "()\n");
- DEBUG( 4, __FUNCTION__ "()\n");
+ /* Check if client wants the whole log */
+ if (client->callback2)
+ client->callback2(log);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
-
- /*
- * Now, check all discovered devices (if any)
+ /*
+ * Now, check all discovered devices (if any), and notify client
+ * only about the services that the client is interested in
*/
- discovery = ( DISCOVERY *) hashbin_get_first( log);
- while ( discovery != NULL) {
- self->daddr = discovery->daddr;
-
- DEBUG( 4, "discovery->daddr = 0x%08x\n", discovery->daddr);
+ discovery = (discovery_t *) hashbin_get_first(log);
+ while (discovery != NULL) {
+ DEBUG(3, "discovery->daddr = 0x%08x\n", discovery->daddr);
- irlmp_check_services( discovery);
-
- discovery = ( DISCOVERY *) hashbin_get_next( log);
+ if (client->hint_mask & discovery->hints.word) {
+ if (client->callback1)
+ client->callback1(discovery);
+ }
+ discovery = (discovery_t *) hashbin_get_next(log);
}
}
/*
- * Function irlmp_discovery_indication (discovery)
- *
- * A remote device is discovering us!
+ * Function irlmp_discovery_confirm ( self, log)
*
+ * Some device(s) answered to our discovery request! Check to see which
+ * device it is, and give indication to the client(s)
+ *
*/
-void irlmp_discovery_indication( struct lap_cb *self, DISCOVERY *discovery)
+void irlmp_discovery_confirm(hashbin_t *log)
{
- /* struct irda_event event; */
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
- ASSERT( discovery != NULL, return;);
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- DEBUG( 4, "discovery->daddr = 0x%08x\n", discovery->daddr);
- self->daddr = discovery->daddr;
-
- /*
- * Create a new discovery log if neccessary
- */
- /* if ( self->cachelog == NULL) */
-/* self->cachelog = hashbin_new( HB_LOCAL); */
- ASSERT( self->cachelog != NULL, return;);
-
- /*
- * Insert this discovery device into the discovery_log if its
- * not there already
- */
- if ( !hashbin_find( self->cachelog, discovery->daddr, NULL))
- hashbin_insert( self->cachelog, (QUEUE *) discovery,
- discovery->daddr, NULL);
-
- irlmp_check_services( discovery);
+ irlmp_client_t *client;
+
+ DEBUG(3, __FUNCTION__ "()\n");
+
+ ASSERT(log != NULL, return;);
+
+ if (!hashbin_get_size(log))
+ return;
+
+ client = (irlmp_client_t *) hashbin_get_first(irlmp->clients);
+ while (client != NULL) {
+ /* Check if we should notify client */
+ irlmp_notify_client(client, log);
+
+ client = (irlmp_client_t *) hashbin_get_next(irlmp->clients);
+ }
}
/*
@@ -807,14 +853,13 @@ void irlmp_discovery_indication( struct lap_cb *self, DISCOVERY *discovery)
* Used by IrLAP to get the disocvery info it needs when answering
* discovery requests by other devices.
*/
-DISCOVERY *irlmp_get_discovery_response()
+discovery_t *irlmp_get_discovery_response()
{
- DEBUG( 4, "irlmp_get_discovery_response()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( irlmp != NULL, return NULL;);
+ ASSERT(irlmp != NULL, return NULL;);
- irlmp->discovery_rsp.hint[0] = irlmp->hint[0];
- irlmp->discovery_rsp.hint[1] = irlmp->hint[1];
+ irlmp->discovery_rsp.hints.word = irlmp->hints.word;
/*
* Set character set for device name (we use ASCII), and
@@ -823,8 +868,8 @@ DISCOVERY *irlmp_get_discovery_response()
*/
irlmp->discovery_rsp.charset = CS_ASCII;
- strncpy( irlmp->discovery_rsp.info, sysctl_devname, 31);
- irlmp->discovery_rsp.info_len = strlen( irlmp->discovery_rsp.info) + 2;
+ strncpy(irlmp->discovery_rsp.info, sysctl_devname, 31);
+ irlmp->discovery_rsp.info_len = strlen(irlmp->discovery_rsp.info) + 2;
return &irlmp->discovery_rsp;
}
@@ -835,19 +880,17 @@ DISCOVERY *irlmp_get_discovery_response()
* Send some data to peer device
*
*/
-void irlmp_data_request( struct lsap_cb *self, struct sk_buff *skb)
+void irlmp_data_request(struct lsap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( skb != NULL, return;);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
/* Make room for MUX header */
- ASSERT( skb_headroom( skb) >= LMP_HEADER, return;);
- skb_push( skb, LMP_HEADER);
+ ASSERT(skb_headroom( skb) >= LMP_HEADER, return;);
+ skb_push(skb, LMP_HEADER);
- irlmp_do_lsap_event( self, LM_DATA_REQUEST, skb);
+ irlmp_do_lsap_event(self, LM_DATA_REQUEST, skb);
}
/*
@@ -856,18 +899,12 @@ void irlmp_data_request( struct lsap_cb *self, struct sk_buff *skb)
* Got data from LAP layer so pass it up to upper layer
*
*/
-void irlmp_data_indication( struct lsap_cb *self, struct sk_buff *skb)
+inline void irlmp_data_indication(struct lsap_cb *self, struct sk_buff *skb)
{
- DEBUG( 4, "irlmp_data_indication()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
/* Hide LMP header from layer above */
- skb_pull( skb, LMP_HEADER);
+ skb_pull(skb, LMP_HEADER);
- if ( self->notify.data_indication)
+ if (self->notify.data_indication)
self->notify.data_indication(self->notify.instance, self, skb);
}
@@ -877,13 +914,11 @@ void irlmp_data_indication( struct lsap_cb *self, struct sk_buff *skb)
*
*
*/
-void irlmp_udata_request( struct lsap_cb *self, struct sk_buff *skb)
+inline void irlmp_udata_request( struct lsap_cb *self, struct sk_buff *skb)
{
DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( skb != NULL, return;);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
/* Make room for MUX header */
ASSERT( skb_headroom( skb) >= LMP_HEADER, return;);
@@ -909,9 +944,8 @@ void irlmp_udata_indication( struct lsap_cb *self, struct sk_buff *skb)
/* Hide LMP header from layer above */
skb_pull( skb, LMP_HEADER);
- if ( self->notify.udata_indication)
- self->notify.udata_indication( self->notify.instance, self,
- skb);
+ if (self->notify.udata_indication)
+ self->notify.udata_indication(self->notify.instance, self, skb);
}
/*
@@ -922,7 +956,7 @@ void irlmp_udata_indication( struct lsap_cb *self, struct sk_buff *skb)
*/
void irlmp_connectionless_data_request( struct sk_buff *skb)
{
- DEBUG( 0, __FUNCTION__ "(), Sorry not implemented\n");
+ DEBUG( 1, __FUNCTION__ "(), Sorry not implemented\n");
}
/*
@@ -933,12 +967,12 @@ void irlmp_connectionless_data_request( struct sk_buff *skb)
*/
void irlmp_connectionless_data_indication( struct sk_buff *skb)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG( 1, __FUNCTION__ "()\n");
}
void irlmp_status_request(void)
{
- DEBUG( 0, "irlmp_status_request(), Not implemented\n");
+ DEBUG( 1, "irlmp_status_request(), Not implemented\n");
}
void irlmp_status_indication( LINK_STATUS link, LOCK_STATUS lock)
@@ -952,38 +986,41 @@ void irlmp_status_indication( LINK_STATUS link, LOCK_STATUS lock)
* Returns a list of all servics contained in the given hint bits. This
* funtion assumes that the hint bits have the size of two bytes only
*/
-__u8 *irlmp_hint_to_service( __u8 *hint)
+__u8 *irlmp_hint_to_service(__u8 *hint)
{
__u8 *service;
int i = 0;
- /* Allocate array to store services in */
+ /*
+ * Allocate array to store services in. 16 entries should be safe
+ * since we currently only support 2 hint bytes
+ */
service = kmalloc( 16, GFP_ATOMIC);
if ( !service) {
- DEBUG( 0, "irlmp_hint_to_service: Unable to kmalloc!\n");
+ DEBUG(1, __FUNCTION__ "(), Unable to kmalloc!\n");
return NULL;
}
if ( !hint[0]) {
- printk( "<None>\n");
+ DEBUG(1, "<None>\n");
return NULL;
}
if ( hint[0] & HINT_PNP)
- printk( "PnP Compatible ");
+ DEBUG(1, "PnP Compatible ");
if ( hint[0] & HINT_PDA)
- printk( "PDA/Palmtop ");
+ DEBUG(1, "PDA/Palmtop ");
if ( hint[0] & HINT_COMPUTER)
- printk( "Computer ");
+ DEBUG(1, "Computer ");
if ( hint[0] & HINT_PRINTER) {
- printk( "Printer\n");
+ DEBUG(1, "Printer ");
service[i++] = S_PRINTER;
}
if ( hint[0] & HINT_MODEM)
- printk( "Modem ");
+ DEBUG(1, "Modem ");
if ( hint[0] & HINT_FAX)
- printk( "Fax ");
+ DEBUG(1, "Fax ");
if ( hint[0] & HINT_LAN) {
- printk( "LAN Access\n");
+ DEBUG(1, "LAN Access ");
service[i++] = S_LAN;
}
/*
@@ -992,22 +1029,25 @@ __u8 *irlmp_hint_to_service( __u8 *hint)
* (IrLMP p. 29)
*/
if ( hint[0] & HINT_EXTENSION) {
- if ( hint[1] & HINT_TELEPHONY)
- printk( "Telephony ");
-
- if ( hint[1] & HINT_FILE_SERVER)
- printk( "File Server ");
+ if ( hint[1] & HINT_TELEPHONY) {
+ DEBUG(1, "Telephony ");
+ service[i++] = S_TELEPHONY;
+ } if ( hint[1] & HINT_FILE_SERVER)
+ DEBUG(1, "File Server ");
if ( hint[1] & HINT_COMM) {
- printk( "IrCOMM ");
+ DEBUG(1, "IrCOMM ");
service[i++] = S_COMM;
}
if ( hint[1] & HINT_OBEX) {
- printk( "IrOBEX ");
+ DEBUG(1, "IrOBEX ");
service[i++] = S_OBEX;
}
}
- printk( "\n");
+ DEBUG(1, "\n");
+
+ /* So that client can be notified about any discovery */
+ service[i++] = S_ANY;
service[i] = S_END;
@@ -1015,139 +1055,219 @@ __u8 *irlmp_hint_to_service( __u8 *hint)
}
/*
- * Function irlmp_service_to_hint (service, hint)
+ * Function irlmp_service_to_hint (service)
*
- *
+ * Converts a service type, to a hint bit
*
+ * Returns: a 16 bit hint value, with the service bit set
*/
-void irlmp_service_to_hint( int service, __u8 *hint)
+__u16 irlmp_service_to_hint(int service)
{
+ __u16_host_order hint;
+
+ hint.word = 0;
+
switch (service) {
case S_PNP:
- hint[0] |= HINT_PNP;
+ hint.byte[0] |= HINT_PNP;
break;
case S_PDA:
- hint[0] |= HINT_PDA;
+ hint.byte[0] |= HINT_PDA;
break;
case S_COMPUTER:
- hint[0] |= HINT_COMPUTER;
+ hint.byte[0] |= HINT_COMPUTER;
break;
case S_PRINTER:
- hint[0] |= HINT_PRINTER;
+ hint.byte[0] |= HINT_PRINTER;
break;
case S_MODEM:
- hint[0] |= HINT_PRINTER;
+ hint.byte[0] |= HINT_PRINTER;
break;
case S_LAN:
- hint[0] |= HINT_LAN;
+ hint.byte[0] |= HINT_LAN;
break;
case S_COMM:
- hint[0] |= HINT_EXTENSION;
- hint[1] |= HINT_COMM;
+ hint.byte[0] |= HINT_EXTENSION;
+ hint.byte[1] |= HINT_COMM;
break;
case S_OBEX:
- hint[0] |= HINT_EXTENSION;
- hint[1] |= HINT_OBEX;
+ hint.byte[0] |= HINT_EXTENSION;
+ hint.byte[1] |= HINT_OBEX;
+ break;
+ case S_ANY:
+ hint.word = 0xffff;
break;
default:
- DEBUG( 0, "irlmp_service_to_hint(), Unknown service!\n");
+ DEBUG( 1, __FUNCTION__ "(), Unknown service!\n");
break;
}
+ return hint.word;
}
/*
- * Function irlmp_register (service, type, callback)
+ * Function irlmp_register_service (service)
*
- * Register a local client or server with IrLMP
+ * Register local service with IrLMP
*
*/
-void irlmp_register_layer( int service, int type, int do_discovery,
- DISCOVERY_CALLBACK callback)
+__u32 irlmp_register_service(__u16 hints)
{
- struct irlmp_registration *entry;
+ irlmp_service_t *service;
+ __u32 handle;
- sysctl_discovery |= do_discovery;
+ DEBUG(4, __FUNCTION__ "(), hints = %04x\n", hints);
- if ( type & SERVER)
- irlmp_service_to_hint( service, irlmp->hint);
+ /* Get a unique handle for this service */
+ get_random_bytes(&handle, sizeof(handle));
+ while (hashbin_find(irlmp->services, handle, NULL) || !handle)
+ get_random_bytes(&handle, sizeof(handle));
- /* Check if this service has been registred before */
- entry = hashbin_find( irlmp->registry, service, NULL);
- if ( entry != NULL) {
- /* Update type in entry */
- entry->type |= type;
-
- /* Update callback only if client, since servers don't
- * use callbacks, and we don't want to overwrite a
- * previous registred client callback
- */
- if ( type & CLIENT)
- entry->discovery_callback = callback;
- return;
- }
+ irlmp->hints.word |= hints;
/* Make a new registration */
- entry = kmalloc( sizeof( struct irlmp_registration), GFP_ATOMIC);
- if ( !entry) {
- DEBUG( 0, "irlmp_register(), Unable to kmalloc!\n");
- return;
+ service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC);
+ if (!service) {
+ DEBUG(1, __FUNCTION__ "(), Unable to kmalloc!\n");
+ return 0;
}
+ service->hints = hints;
+ hashbin_insert(irlmp->services, (QUEUE*) service, handle, NULL);
- entry->service = service;
- entry->type = type;
- entry->discovery_callback = callback;
-
- hashbin_insert( irlmp->registry, (QUEUE*) entry, entry->service, NULL);
+ return handle;
}
/*
- * Function irlmp_unregister (serivice)
+ * Function irlmp_unregister_service (handle)
*
- *
+ * Unregister service with IrLMP.
*
+ * Returns: 0 on success, -1 on error
*/
-void irlmp_unregister_layer( int service, int type)
+int irlmp_unregister_service(__u32 handle)
{
- struct irlmp_registration *entry;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- entry = hashbin_find( irlmp->registry, service, NULL);
- if ( entry != NULL) {
- DEBUG( 4, "Found entry to change or remove!\n");
- /* Remove this type from the service registration */
- entry->type &= ~type;
- }
+ irlmp_service_t *service;
+
+ DEBUG(4, __FUNCTION__ "()\n");
- if ( !entry) {
- DEBUG( 0, "Unable to find entry to unregister!\n");
- return;
+ if (!handle)
+ return -1;
+
+ service = hashbin_find(irlmp->services, handle, NULL);
+ if (!service) {
+ DEBUG(1, __FUNCTION__ "(), Unknown service!\n");
+ return -1;
}
- /*
- * Remove entry if there is no more client and server support
- * left in entry
- */
- if ( !entry->type) {
- DEBUG( 4, __FUNCTION__ "(), removing entry!\n");
- entry = hashbin_remove( irlmp->registry, service, NULL);
- if ( entry != NULL)
- kfree( entry);
- }
+ service = hashbin_remove(irlmp->services, handle, NULL);
+ if (service)
+ kfree(service);
/* Remove old hint bits */
- irlmp->hint[0] = 0;
- irlmp->hint[1] = 0;
+ irlmp->hints.word = 0;
/* Refresh current hint bits */
- entry = (struct irlmp_registration *) hashbin_get_first( irlmp->registry);
- while( entry != NULL) {
- if ( entry->type & SERVER)
- irlmp_service_to_hint( entry->service,
- irlmp->hint);
- entry = (struct irlmp_registration *)
- hashbin_get_next( irlmp->registry);
+ service = (irlmp_service_t *) hashbin_get_first(irlmp->services);
+ while (service) {
+ irlmp->hints.word |= service->hints;
+
+ service = (irlmp_service_t *)hashbin_get_next(irlmp->services);
}
+ return 0;
+}
+
+/*
+ * Function irlmp_register_client (hint_mask, callback1, callback2)
+ *
+ * Register a local client with IrLMP
+ *
+ * Returns: handle > 0 on success, 0 on error
+ */
+__u32 irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 callback1,
+ DISCOVERY_CALLBACK2 callback2)
+{
+ irlmp_client_t *client;
+ __u32 handle;
+
+ /* Get a unique handle for this client */
+ get_random_bytes(&handle, sizeof(handle));
+ while (hashbin_find(irlmp->clients, handle, NULL) || !handle)
+ get_random_bytes(&handle, sizeof(handle));
+
+ /* Make a new registration */
+ client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC);
+ if (!client) {
+ DEBUG( 1, __FUNCTION__ "(), Unable to kmalloc!\n");
+
+ return 0;
+ }
+
+ /* Register the details */
+ client->hint_mask = hint_mask;
+ client->callback1 = callback1;
+ client->callback2 = callback2;
+
+ hashbin_insert(irlmp->clients, (QUEUE *) client, handle, NULL);
+
+ return handle;
+}
+
+/*
+ * Function irlmp_update_client (handle, hint_mask, callback1, callback2)
+ *
+ * Updates specified client (handle) with possibly new hint_mask and
+ * callback
+ *
+ * Returns: 0 on success, -1 on error
+ */
+int irlmp_update_client(__u32 handle, __u16 hint_mask,
+ DISCOVERY_CALLBACK1 callback1,
+ DISCOVERY_CALLBACK2 callback2)
+{
+ irlmp_client_t *client;
+
+ if (!handle)
+ return -1;
+
+ client = hashbin_find(irlmp->clients, handle, NULL);
+ if (!client) {
+ DEBUG(1, __FUNCTION__ "(), Unknown client!\n");
+ return -1;
+ }
+
+ client->hint_mask = hint_mask;
+ client->callback1 = callback1;
+ client->callback2 = callback2;
+
+ return 0;
+}
+
+/*
+ * Function irlmp_unregister_client (handle)
+ *
+ * Returns: 0 on success, -1 on error
+ *
+ */
+int irlmp_unregister_client(__u32 handle)
+{
+ struct irlmp_client *client;
+
+ DEBUG(4, __FUNCTION__ "()\n");
+
+ if (!handle)
+ return -1;
+
+ client = hashbin_find(irlmp->clients, handle, NULL);
+ if (!client) {
+ DEBUG(1, __FUNCTION__ "(), Unknown client!\n");
+ return -1;
+ }
+
+ DEBUG( 4, __FUNCTION__ "(), removing client!\n");
+ client = hashbin_remove( irlmp->clients, handle, NULL);
+ if (client)
+ kfree(client);
+
+ return 0;
}
/*
@@ -1164,22 +1284,26 @@ int irlmp_slsap_inuse( __u8 slsap_sel)
ASSERT( irlmp->magic == LMP_MAGIC, return TRUE;);
ASSERT( slsap_sel != LSAP_ANY, return TRUE;);
- DEBUG( 4, "irlmp_slsap_inuse()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
+
+ /* Valid values are between 0 and 127 */
+ if (slsap_sel > 127)
+ return TRUE;
/*
* Check if slsap is already in use. To do this we have to loop over
* every IrLAP connection and check every LSAP assosiated with each
* the connection.
*/
- lap = ( struct lap_cb *) hashbin_get_first( irlmp->links);
- while ( lap != NULL) {
- ASSERT( lap->magic == LMP_LAP_MAGIC, return TRUE;);
+ lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
+ while (lap != NULL) {
+ ASSERT(lap->magic == LMP_LAP_MAGIC, return TRUE;);
- self = (struct lsap_cb *) hashbin_get_first( lap->lsaps);
- while ( self != NULL) {
- ASSERT( self->magic == LMP_LSAP_MAGIC, return TRUE;);
+ self = (struct lsap_cb *) hashbin_get_first(lap->lsaps);
+ while (self != NULL) {
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return TRUE;);
- if (( self->slsap_sel == slsap_sel))/* && */
+ if ((self->slsap_sel == slsap_sel))/* && */
/* ( self->dlsap_sel == LSAP_ANY)) */
{
DEBUG( 4, "Source LSAP selector=%02x in use\n",
@@ -1202,15 +1326,28 @@ int irlmp_slsap_inuse( __u8 slsap_sel)
__u8 irlmp_find_free_slsap(void)
{
__u8 lsap_sel;
+ int wrapped = 0;
- ASSERT( irlmp != NULL, return -1;);
- ASSERT( irlmp->magic == LMP_MAGIC, return -1;);
+ ASSERT(irlmp != NULL, return -1;);
+ ASSERT(irlmp->magic == LMP_MAGIC, return -1;);
lsap_sel = irlmp->free_lsap_sel++;
+
+ /* Check if the new free lsap is really free */
+ while (irlmp_slsap_inuse(irlmp->free_lsap_sel)) {
+ irlmp->free_lsap_sel++;
- DEBUG( 4, "irlmp_find_free_slsap(), picked next free lsap_sel=%02x\n",
- lsap_sel);
+ /* Check if we need to wraparound */
+ if (irlmp->free_lsap_sel > 127) {
+ irlmp->free_lsap_sel = 10;
+ /* Make sure we terminate the loop */
+ if (wrapped++)
+ return 0;
+ }
+ }
+ DEBUG(4, __FUNCTION__ "(), next free lsap_sel=%02x\n", lsap_sel);
+
return lsap_sel;
}
@@ -1227,21 +1364,25 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
switch (lap_reason) {
case LAP_DISC_INDICATION: /* Received a disconnect request from peer */
+ DEBUG( 1, __FUNCTION__ "(), LAP_DISC_INDICATION\n");
reason = LM_USER_REQUEST;
break;
case LAP_NO_RESPONSE: /* To many retransmits without response */
+ DEBUG( 1, __FUNCTION__ "(), LAP_NO_RESPONSE\n");
reason = LM_LAP_DISCONNECT;
break;
case LAP_RESET_INDICATION:
+ DEBUG( 1, __FUNCTION__ "(), LAP_RESET_INDICATION\n");
reason = LM_LAP_RESET;
break;
case LAP_FOUND_NONE:
case LAP_MEDIA_BUSY:
case LAP_PRIMARY_CONFLICT:
+ DEBUG( 1, __FUNCTION__ "(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n");
reason = LM_CONNECT_FAILURE;
break;
default:
- DEBUG( 0, __FUNCTION__
+ DEBUG( 1, __FUNCTION__
"(), Unknow IrLAP disconnect reason %d!\n", lap_reason);
reason = LM_LAP_DISCONNECT;
break;
@@ -1250,6 +1391,26 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason)
return reason;
}
+__u32 irlmp_get_saddr(struct lsap_cb *self)
+{
+ DEBUG(3, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return 0;);
+ ASSERT(self->lap != NULL, return 0;);
+
+ return self->lap->saddr;
+}
+
+__u32 irlmp_get_daddr(struct lsap_cb *self)
+{
+ DEBUG(3, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return 0;);
+ ASSERT(self->lap != NULL, return 0;);
+
+ return self->lap->daddr;
+}
+
#ifdef CONFIG_PROC_FS
/*
* Function irlmp_proc_read (buf, start, offset, len, unused)
@@ -1295,8 +1456,7 @@ int irlmp_proc_read( char *buf, char **start, off_t offset, int len,
len += sprintf( buf+len, "lap state: %s, ",
irlmp_state[ lap->lap_state]);
- len += sprintf( buf+len,
- "saddr: %#08x, daddr: %#08x, ",
+ len += sprintf( buf+len, "saddr: %#08x, daddr: %#08x, ",
lap->saddr, lap->daddr);
len += sprintf( buf+len, "\n");
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index a1f537958..20a2dff35 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irlmp_event.c
- * Version: 0.1
+ * Version: 0.8
* Description: An IrDA LMP event driver for Linux
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sat Jan 16 22:22:29 1999
+ * Modified at: Fri Apr 23 08:57:23 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -32,13 +32,13 @@
#include <net/irda/irlmp_frame.h>
#include <net/irda/irlmp_event.h>
-char *irlmp_state[] = {
+const char *irlmp_state[] = {
"LAP_STANDBY",
"LAP_U_CONNECT",
"LAP_ACTIVE",
};
-char *irlsap_state[] = {
+const char *irlsap_state[] = {
"LSAP_DISCONNECTED",
"LSAP_CONNECT",
"LSAP_CONNECT_PEND",
@@ -47,7 +47,7 @@ char *irlsap_state[] = {
"LSAP_SETUP_PEND",
};
-static char *irlmp_event[] = {
+static const char *irlmp_event[] = {
"LM_CONNECT_REQUEST",
"LM_CONNECT_CONFIRM",
"LM_CONNECT_RESPONSE",
@@ -71,6 +71,7 @@ static char *irlmp_event[] = {
"LM_LAP_DISCONNECT_REQUEST",
"LM_LAP_DISCOVERY_REQUEST",
"LM_LAP_DISCOVERY_CONFIRM",
+ "LM_LAP_IDLE_TIMEOUT",
};
/* LAP Connection control proto declarations */
@@ -113,16 +114,16 @@ static void (*lsap_state[])( struct lsap_cb *, IRLMP_EVENT, struct sk_buff *) =
};
/* Do connection control events */
-void irlmp_do_lsap_event( struct lsap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+void irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
- DEBUG( 4, __FUNCTION__ "(), EVENT = %s, STATE = %s\n",
- irlmp_event[ event], irlmp_state[ self->lsap_state]);
+ DEBUG(4, __FUNCTION__ "(), EVENT = %s, STATE = %s\n",
+ irlmp_event[ event], irlmp_state[ self->lsap_state]);
- (*lsap_state[ self->lsap_state]) ( self, event, skb);
+ (*lsap_state[self->lsap_state]) (self, event, skb);
}
/*
@@ -131,41 +132,52 @@ void irlmp_do_lsap_event( struct lsap_cb *self, IRLMP_EVENT event,
* Do IrLAP control events
*
*/
-void irlmp_do_lap_event( struct lap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
- DEBUG( 4, __FUNCTION__ "(), EVENT = %s, STATE = %s\n",
- irlmp_event[event],
- irlmp_state[self->lap_state]);
+ DEBUG(4, __FUNCTION__ "(), EVENT = %s, STATE = %s\n",
+ irlmp_event[event],
+ irlmp_state[self->lap_state]);
- (*lap_state[ self->lap_state]) ( self, event, skb);
+ (*lap_state[self->lap_state]) (self, event, skb);
}
void irlmp_discovery_timer_expired( unsigned long data)
{
-/* struct irlmp_cb *self = ( struct irlmp_cb *) data; */
-
- DEBUG( 4, "IrLMP, discovery timer expired!\n");
+ DEBUG(4, "IrLMP, discovery timer expired!\n");
- irlmp_discovery_request( 8);
+ if (sysctl_discovery)
+ irlmp_do_discovery(sysctl_discovery_slots);
/* Restart timer */
- irlmp_start_discovery_timer( irlmp, 300);
+ irlmp_start_discovery_timer(irlmp, 300);
}
void irlmp_watchdog_timer_expired( unsigned long data)
{
struct lsap_cb *self = ( struct lsap_cb *) data;
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+
+ irlmp_do_lsap_event(self, LM_WATCHDOG_TIMEOUT, NULL);
+}
- irlmp_do_lsap_event( self, LM_WATCHDOG_TIMEOUT, NULL);
+void irlmp_idle_timer_expired(unsigned long data)
+{
+ struct lap_cb *self = (struct lap_cb *) data;
+
+ DEBUG(2, __FUNCTION__ "()\n");
+
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ irlmp_do_lap_event(self, LM_LAP_IDLE_TIMEOUT, NULL);
}
/*********************************************************************
@@ -180,8 +192,8 @@ void irlmp_watchdog_timer_expired( unsigned long data)
* STANDBY, The IrLAP connection does not exist.
*
*/
-static void irlmp_state_standby( struct lap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self->irlap != NULL, return;);
@@ -190,11 +202,11 @@ static void irlmp_state_standby( struct lap_cb *self, IRLMP_EVENT event,
case LM_LAP_DISCOVERY_REQUEST:
/* irlmp_next_station_state( LMP_DISCOVER); */
- irlap_discovery_request( self->irlap, &irlmp->discovery_cmd);
+ irlap_discovery_request(self->irlap, &irlmp->discovery_cmd);
break;
case LM_LAP_DISCOVERY_CONFIRM:
/* irlmp_next_station_state( LMP_READY); */
- irlmp_discovery_confirm( self, self->cachelog);
+ irlmp_discovery_confirm(irlmp->cachelog);
break;
case LM_LAP_CONNECT_INDICATION:
/* It's important to switch state first, to avoid IrLMP to
@@ -207,10 +219,10 @@ static void irlmp_state_standby( struct lap_cb *self, IRLMP_EVENT event,
irlap_connect_response( self->irlap, skb);
break;
case LM_LAP_CONNECT_REQUEST:
- DEBUG( 4, "irlmp_state_standby() LS_CONNECT_REQUEST\n");
+ DEBUG(4, __FUNCTION__ "() LS_CONNECT_REQUEST\n");
/* FIXME: need to set users requested QoS */
- irlap_connect_request( self->irlap, self->daddr, NULL, 0);
+ irlap_connect_request(self->irlap, self->daddr, NULL, 0);
irlmp_next_lap_state( self, LAP_U_CONNECT);
break;
@@ -221,7 +233,7 @@ static void irlmp_state_standby( struct lap_cb *self, IRLMP_EVENT event,
irlmp_next_lap_state( self, LAP_STANDBY);
break;
default:
- DEBUG( 4, "irlmp_state_standby: Unknown event\n");
+ DEBUG(4, __FUNCTION__ "(), Unknown event\n");
break;
}
}
@@ -239,7 +251,7 @@ static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
struct lsap_cb *lsap;
struct lsap_cb *lsap_current;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "(), event=%s\n", irlmp_event[ event]);
switch( event) {
case LM_LAP_CONNECT_CONFIRM:
@@ -253,7 +265,7 @@ static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
}
break;
case LM_LAP_DISCONNECT_INDICATION:
- DEBUG( 4, __FUNCTION__ "(), IRLAP_DISCONNECT_INDICATION\n");
+ DEBUG(2, __FUNCTION__ "(), IRLAP_DISCONNECT_INDICATION\n");
irlmp_next_lap_state( self, LAP_STANDBY);
@@ -292,8 +304,8 @@ static void irlmp_state_u_connect( struct lap_cb *self, IRLMP_EVENT event,
* ACTIVE, IrLAP connection is active
*
*/
-static void irlmp_state_active( struct lap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
struct lsap_cb *lsap;
struct lsap_cb *lsap_current;
@@ -310,31 +322,48 @@ static void irlmp_state_active( struct lap_cb *self, IRLMP_EVENT event,
* notify all LSAPs using this LAP, but that should be safe to
* do anyway.
*/
- lsap = ( struct lsap_cb *) hashbin_get_first( self->lsaps);
- while ( lsap != NULL) {
- irlmp_do_lsap_event( lsap, LM_LAP_CONNECT_CONFIRM,
- skb);
- lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
+ lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
+ while (lsap != NULL) {
+ irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, skb);
+ lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
}
+ /* Needed by connect indication */
+ lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
+ while (lsap != NULL) {
+ lsap_current = lsap;
+
+ /* Be sure to stay one item ahead */
+ lsap = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps);
+ irlmp_do_lsap_event(lsap_current,
+ LM_LAP_CONNECT_CONFIRM, skb);
+ }
/* Keep state */
break;
case LM_LAP_DISCONNECT_REQUEST:
- DEBUG( 4, __FUNCTION__ "(), LM_LAP_DISCONNECT_REQUEST\n");
+ DEBUG(4, __FUNCTION__ "(), LM_LAP_DISCONNECT_REQUEST\n");
/*
- * Need to find out if we should close IrLAP or not
+ * Need to find out if we should close IrLAP or not. If there
+ * is only one LSAP connection left on this link, that LSAP
+ * must be the one that tries to close IrLAP. It will be
+ * removed later and moved to the list of unconnected LSAPs
*/
- if ( hashbin_get_size( self->lsaps) == 0) {
- DEBUG( 0, __FUNCTION__
- "(), no more LSAPs so time to disconnect IrLAP\n");
- irlmp_next_lap_state( self, LAP_STANDBY);
-
- irlap_disconnect_request( self->irlap);
+ if (hashbin_get_size(self->lsaps) == 1)
+ irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT);
+
+ break;
+ case LM_LAP_IDLE_TIMEOUT:
+ if (hashbin_get_size(self->lsaps) == 0) {
+ DEBUG(2, __FUNCTION__
+ "(), no more LSAPs so time to close IrLAP\n");
+ irlmp_next_lap_state(self, LAP_STANDBY);
+
+ irlap_disconnect_request(self->irlap);
}
break;
case LM_LAP_DISCONNECT_INDICATION:
- DEBUG( 4, __FUNCTION__ "(), IRLAP_DISCONNECT_INDICATION\n");
+ DEBUG(4, __FUNCTION__ "(), IRLAP_DISCONNECT_INDICATION\n");
irlmp_next_lap_state( self, LAP_STANDBY);
@@ -375,8 +404,6 @@ static void irlmp_state_active( struct lap_cb *self, IRLMP_EVENT event,
static void irlmp_state_disconnected( struct lsap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
- struct lsap_cb *lsap;
-
DEBUG( 4, __FUNCTION__ "()\n");
ASSERT( self != NULL, return;);
@@ -387,36 +414,18 @@ static void irlmp_state_disconnected( struct lsap_cb *self, IRLMP_EVENT event,
DEBUG( 4, __FUNCTION__ "(), LM_CONNECT_REQUEST\n");
irlmp_next_lsap_state( self, LSAP_SETUP_PEND);
- irlmp_do_lap_event( self->lap, LM_LAP_CONNECT_REQUEST, NULL);
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
/* Start watchdog timer ( 5 secs for now) */
- irlmp_start_watchdog_timer( self, 500);
+ irlmp_start_watchdog_timer(self, 500);
break;
case LM_CONNECT_INDICATION:
- irlmp_next_lsap_state( self, LSAP_CONNECT_PEND);
-
+ irlmp_next_lsap_state(self, LSAP_CONNECT_PEND);
- /*
- * Bind this LSAP to the IrLAP link where the connect was
- * received
- * FIXME: this should be done in the LAP state machine
- */
- lsap = hashbin_remove( irlmp->unconnected_lsaps,
- self->slsap_sel, NULL);
-
- ASSERT( lsap == self, return;);
-
- ASSERT( self->lap != NULL, return;);
- ASSERT( self->lap->lsaps != NULL, return;);
-
- hashbin_insert( self->lap->lsaps, (QUEUE *) self,
- self->slsap_sel, NULL);
-
- irlmp_do_lap_event( self->lap, LM_LAP_CONNECT_REQUEST, skb);
+ irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, skb);
break;
default:
- /* DEBUG( 4, "irlmp_state_disconnected: Unknown event %d\n",
- event); */
+ DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n", event);
break;
}
}
@@ -430,6 +439,7 @@ static void irlmp_state_disconnected( struct lsap_cb *self, IRLMP_EVENT event,
static void irlmp_state_connect( struct lsap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
+ struct lsap_cb *lsap;
DEBUG( 4, __FUNCTION__ "()\n");
@@ -440,15 +450,30 @@ static void irlmp_state_connect( struct lsap_cb *self, IRLMP_EVENT event,
case LM_CONNECT_RESPONSE:
ASSERT( skb != NULL, return;);
- irlmp_send_lcf_pdu( self->lap, self->dlsap_sel,
- self->slsap_sel, CONNECT_CNF, skb);
+ /*
+ * Bind this LSAP to the IrLAP link where the connect was
+ * received
+ */
+ lsap = hashbin_remove(irlmp->unconnected_lsaps, (int) self,
+ NULL);
- del_timer( &self->watchdog_timer);
+ ASSERT(lsap == self, return;);
+
+ ASSERT(self->lap != NULL, return;);
+ ASSERT(self->lap->lsaps != NULL, return;);
+
+ hashbin_insert(self->lap->lsaps, (QUEUE *) self, (int) self,
+ NULL);
- irlmp_next_lsap_state( self, LSAP_DATA_TRANSFER_READY);
+ irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, CONNECT_CNF, skb);
+
+ del_timer(&self->watchdog_timer);
+
+ irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY);
break;
default:
- DEBUG( 4, "irlmp_state_connect: Unknown event\n");
+ DEBUG( 4, __FUNCTION__ "(), Unknown event\n");
break;
}
}
@@ -459,8 +484,8 @@ static void irlmp_state_connect( struct lsap_cb *self, IRLMP_EVENT event,
* CONNECT_PEND
*
*/
-static void irlmp_state_connect_pend( struct lsap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+static void irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
DEBUG( 4, __FUNCTION__ "()\n");
@@ -472,25 +497,22 @@ static void irlmp_state_connect_pend( struct lsap_cb *self, IRLMP_EVENT event,
/* Keep state */
break;
case LM_CONNECT_RESPONSE:
- printk( KERN_WARNING
- "IrLMP CONNECT-PEND, No indication issued yet\n");
+ DEBUG(0, __FUNCTION__ "(), LM_CONNECT_RESPONSE, "
+ "no indication issued yet\n");
/* Keep state */
break;
case LM_DISCONNECT_REQUEST:
- printk( KERN_WARNING
- "IrLMP CONNECT-PEND, "
- "Not yet bound to IrLAP connection\n");
+ DEBUG(0, __FUNCTION__ "(), LM_DISCONNECT_REQUEST, "
+ "not yet bound to IrLAP connection\n");
/* Keep state */
break;
case LM_LAP_CONNECT_CONFIRM:
- DEBUG( 4, "irlmp_state_connect_pend: LS_CONNECT_CONFIRM\n");
- irlmp_next_lsap_state( self, LSAP_CONNECT);
- irlmp_connect_indication( self, skb);
+ DEBUG(4, __FUNCTION__ "(), LS_CONNECT_CONFIRM\n");
+ irlmp_next_lsap_state(self, LSAP_CONNECT);
+ irlmp_connect_indication(self, skb);
break;
-
default:
- DEBUG( 4, "irlmp_state_connect_pend: Unknown event %d\n",
- event);
+ DEBUG( 4, __FUNCTION__ "Unknown event %d\n", event);
break;
}
}
@@ -501,88 +523,86 @@ static void irlmp_state_connect_pend( struct lsap_cb *self, IRLMP_EVENT event,
* DATA_TRANSFER_READY
*
*/
-static void irlmp_state_dtr( struct lsap_cb *self, IRLMP_EVENT event,
- struct sk_buff *skb)
+static void irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
+ struct sk_buff *skb)
{
LM_REASON reason;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
- ASSERT( self->lap != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
+ ASSERT(self->lap != NULL, return;);
- switch( event) {
+ switch (event) {
+ case LM_DATA_REQUEST: /* Optimize for the common case */
+ irlmp_send_data_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, FALSE, skb);
+ /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
+ break;
+ case LM_DATA_INDICATION: /* Optimize for the common case */
+ irlmp_data_indication(self, skb);
+ /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
+ break;
+ case LM_UDATA_REQUEST:
+ ASSERT(skb != NULL, return;);
+ irlmp_send_data_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, TRUE, skb);
+ break;
+ case LM_UDATA_INDICATION:
+ irlmp_udata_indication(self, skb);
+ /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
+ break;
case LM_CONNECT_REQUEST:
- printk( KERN_WARNING
- "IrLMP DTR: Error, LSAP allready connected\n");
+ DEBUG(0, __FUNCTION__ "(), LM_CONNECT_REQUEST, "
+ "error, LSAP already connected\n");
/* Keep state */
break;
case LM_CONNECT_RESPONSE:
- printk( KERN_WARNING
- "IrLMP DTR: Error, LSAP allready connected\n");
+ DEBUG(0, __FUNCTION__ "(), LM_CONNECT_RESPONSE, "
+ "error, LSAP allready connected\n");
/* Keep state */
break;
case LM_DISCONNECT_REQUEST:
- ASSERT( skb != NULL, return;);
+ ASSERT(skb != NULL, return;);
- irlmp_send_lcf_pdu( self->lap, self->dlsap_sel,
- self->slsap_sel, DISCONNECT, skb);
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
+ self->slsap_sel, DISCONNECT, skb);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
/* Try to close the LAP connection if its still there */
- if ( self->lap) {
- DEBUG( 4, __FUNCTION__ "(), trying to close IrLAP\n");
- irlmp_do_lap_event( self->lap,
- LM_LAP_DISCONNECT_REQUEST,
- NULL);
+ if (self->lap) {
+ DEBUG(4, __FUNCTION__ "(), trying to close IrLAP\n");
+ irlmp_do_lap_event(self->lap,
+ LM_LAP_DISCONNECT_REQUEST,
+ NULL);
}
-
- break;
- case LM_DATA_REQUEST:
- ASSERT( skb != NULL, return;);
- irlmp_send_data_pdu( self->lap, self->dlsap_sel,
- self->slsap_sel, FALSE, skb);
- /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
- break;
- case LM_UDATA_REQUEST:
- ASSERT( skb != NULL, return;);
- irlmp_send_data_pdu( self->lap, self->dlsap_sel,
- self->slsap_sel, TRUE, skb);
- break;
- case LM_DATA_INDICATION:
- irlmp_data_indication( self, skb);
- /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
- break;
- case LM_UDATA_INDICATION:
- irlmp_udata_indication( self, skb);
- /* irlmp_next_lsap_state( DATA_TRANSFER_READY, info->handle);*/
break;
case LM_LAP_DISCONNECT_INDICATION:
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
- reason = irlmp_convert_lap_reason( self->lap->reason);
+ reason = irlmp_convert_lap_reason(self->lap->reason);
- irlmp_disconnect_indication( self, reason, NULL);
+ irlmp_disconnect_indication(self, reason, NULL);
break;
case LM_DISCONNECT_INDICATION:
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
- ASSERT( self->lap != NULL, return;);
- ASSERT( self->lap->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(self->lap != NULL, return;);
+ ASSERT(self->lap->magic == LMP_LAP_MAGIC, return;);
- reason = irlmp_convert_lap_reason( self->lap->reason);
+ ASSERT(skb != NULL, return;);
+ ASSERT(skb->len > 3, return;);
+ reason = skb->data[3];
/* Try to close the LAP connection */
- DEBUG( 4, __FUNCTION__ "(), trying to close IrLAP\n");
- irlmp_do_lap_event( self->lap, LM_LAP_DISCONNECT_REQUEST,
- NULL);
-
- irlmp_disconnect_indication( self, reason, skb);
+ DEBUG(4, __FUNCTION__ "(), trying to close IrLAP\n");
+ irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+ irlmp_disconnect_indication(self, reason, skb);
break;
default:
- DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n", event);
+ DEBUG(4, __FUNCTION__ "(), Unknown event %d\n", event);
break;
}
}
@@ -602,7 +622,7 @@ static void irlmp_state_setup( struct lsap_cb *self, IRLMP_EVENT event,
ASSERT( self != NULL, return;);
ASSERT( self->magic == LMP_LSAP_MAGIC, return;);
- DEBUG( 4, "irlmp_state_setup()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
switch( event) {
case LM_CONNECT_CONFIRM:
@@ -615,16 +635,28 @@ static void irlmp_state_setup( struct lsap_cb *self, IRLMP_EVENT event,
irlmp_connect_confirm( self, skb);
break;
case LM_DISCONNECT_INDICATION:
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+ DEBUG(0, __FUNCTION__ "(), this should never happen!!\n");
+ break;
+ case LM_LAP_DISCONNECT_INDICATION:
+ irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
- del_timer( &self->watchdog_timer);
+ del_timer(&self->watchdog_timer);
- ASSERT( self->lap != NULL, return;);
- ASSERT( self->lap->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(self->lap != NULL, return;);
+ ASSERT(self->lap->magic == LMP_LAP_MAGIC, return;);
- reason = irlmp_convert_lap_reason( self->lap->reason);
+ reason = irlmp_convert_lap_reason(self->lap->reason);
+
+ irlmp_disconnect_indication(self, reason, skb);
+ break;
+ case LM_WATCHDOG_TIMEOUT:
+ DEBUG( 0, __FUNCTION__ "() WATCHDOG_TIMEOUT!\n");
- irlmp_disconnect_indication( self, reason, skb);
+ ASSERT( self->lap != NULL, return;);
+ irlmp_do_lap_event( self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+ irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+
+ irlmp_disconnect_indication( self, LM_CONNECT_FAILURE, NULL);
break;
default:
DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n", event);
@@ -643,6 +675,7 @@ static void irlmp_state_setup( struct lsap_cb *self, IRLMP_EVENT event,
static void irlmp_state_setup_pend( struct lsap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
+ LM_REASON reason;
DEBUG( 4, __FUNCTION__ "()\n");
@@ -656,18 +689,23 @@ static void irlmp_state_setup_pend( struct lsap_cb *self, IRLMP_EVENT event,
self->tmp_skb);
irlmp_next_lsap_state( self, LSAP_SETUP);
break;
- case LM_DISCONNECT_INDICATION:
- del_timer( &self->watchdog_timer);
-
- irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
- break;
case LM_WATCHDOG_TIMEOUT:
DEBUG( 0, __FUNCTION__ "() WATCHDOG_TIMEOUT!\n");
- /* FIXME: should we do a disconnect_indication? */
ASSERT( self->lap != NULL, return;);
- irlmp_do_lap_event( self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
+ irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL);
irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+
+ irlmp_disconnect_indication( self, LM_CONNECT_FAILURE, NULL);
+ break;
+ case LM_LAP_DISCONNECT_INDICATION: /* LS_Disconnect.indication */
+ del_timer( &self->watchdog_timer);
+
+ irlmp_next_lsap_state( self, LSAP_DISCONNECTED);
+
+ reason = irlmp_convert_lap_reason( self->lap->reason);
+
+ irlmp_disconnect_indication( self, reason, NULL);
break;
default:
DEBUG( 4, __FUNCTION__ "(), Unknown event %d\n", event);
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 9d5ac0c35..bf1bab31e 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 02:09:59 1997
- * Modified at: Sat Jan 16 22:14:04 1999
+ * Modified at: Fri Apr 23 09:12:23 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>
@@ -32,31 +32,26 @@
#include <net/irda/timer.h>
#include <net/irda/irlmp.h>
#include <net/irda/irlmp_frame.h>
+#include <net/irda/discovery.h>
-static struct lsap_cb *irlmp_find_lsap( struct lap_cb *self, __u8 dlsap,
- __u8 slsap, int status, hashbin_t *);
+static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap,
+ __u8 slsap, int status, hashbin_t *);
-inline void irlmp_send_data_pdu( struct lap_cb *self, __u8 dlsap, __u8 slsap,
- int expedited, struct sk_buff *skb)
+inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
+ int expedited, struct sk_buff *skb)
{
__u8 *frame;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
frame = skb->data;
frame[0] = dlsap;
frame[1] = slsap;
- if ( expedited) {
+ if (expedited) {
DEBUG( 4, __FUNCTION__ "(), sending expedited data\n");
irlap_data_request( self->irlap, skb, FALSE);
- } else {
- DEBUG( 4, __FUNCTION__ "(), sending reliable data\n");
- irlap_data_request( self->irlap, skb, TRUE);
- }
+ } else
+ irlap_data_request( self->irlap, skb, TRUE);
}
/*
@@ -97,18 +92,17 @@ void irlmp_send_lcf_pdu( struct lap_cb *self, __u8 dlsap, __u8 slsap,
* Used by IrLAP to pass received data frames to IrLMP layer
*
*/
-void irlmp_link_data_indication( struct lap_cb *self, int reliable,
- struct sk_buff *skb)
+void irlmp_link_data_indication(struct lap_cb *self, int reliable,
+ struct sk_buff *skb)
{
- __u8 *fp;
- __u8 slsap_sel; /* Source (this) LSAP address */
- __u8 dlsap_sel; /* Destination LSAP address */
struct lsap_cb *lsap;
+ __u8 slsap_sel; /* Source (this) LSAP address */
+ __u8 dlsap_sel; /* Destination LSAP address */
+ __u8 *fp;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
- ASSERT( skb->len > 2, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(skb->len > 2, return;);
fp = skb->data;
@@ -123,32 +117,32 @@ void irlmp_link_data_indication( struct lap_cb *self, int reliable,
* Check if this is an incoming connection, since we must deal with
* it in a different way than other established connections.
*/
- if (( fp[0] & CONTROL_BIT) && ( fp[2] == CONNECT_CMD)) {
- DEBUG( 4,"Incoming connection, source LSAP=%d, dest LSAP=%d\n",
- slsap_sel, dlsap_sel);
+ if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) {
+ DEBUG(3,"Incoming connection, source LSAP=%d, dest LSAP=%d\n",
+ slsap_sel, dlsap_sel);
/* Try to find LSAP among the unconnected LSAPs */
- lsap = irlmp_find_lsap( self, dlsap_sel, slsap_sel,
- CONNECT_CMD, irlmp->unconnected_lsaps);
+ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
+ irlmp->unconnected_lsaps);
/* Maybe LSAP was already connected, so try one more time */
- if ( !lsap)
- lsap = irlmp_find_lsap( self, dlsap_sel, slsap_sel, 0,
- self->lsaps);
+ if (!lsap)
+ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
+ self->lsaps);
} else
- lsap = irlmp_find_lsap( self, dlsap_sel, slsap_sel, 0,
- self->lsaps);
+ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
+ self->lsaps);
- if ( lsap == NULL) {
- DEBUG( 0, "IrLMP, Sorry, no LSAP for received frame!\n");
- DEBUG( 0, __FUNCTION__
- "(), slsap_sel = %02x, dlsap_sel = %02x\n", slsap_sel,
- dlsap_sel);
- if ( fp[0] & CONTROL_BIT) {
- DEBUG( 0, __FUNCTION__
- "(), received control frame %02x\n", fp[2]);
+ if (lsap == NULL) {
+ DEBUG(0, "IrLMP, Sorry, no LSAP for received frame!\n");
+ DEBUG(0, __FUNCTION__
+ "(), slsap_sel = %02x, dlsap_sel = %02x\n", slsap_sel,
+ dlsap_sel);
+ if (fp[0] & CONTROL_BIT) {
+ DEBUG(0, __FUNCTION__
+ "(), received control frame %02x\n", fp[2]);
} else {
- DEBUG( 0, __FUNCTION__ "(), received data frame\n");
+ DEBUG(0, __FUNCTION__ "(), received data frame\n");
}
dev_kfree_skb( skb);
return;
@@ -157,19 +151,19 @@ void irlmp_link_data_indication( struct lap_cb *self, int reliable,
/*
* Check if we received a control frame?
*/
- if ( fp[0] & CONTROL_BIT) {
- switch( fp[2]) {
+ if (fp[0] & CONTROL_BIT) {
+ switch(fp[2]) {
case CONNECT_CMD:
lsap->lap = self;
- irlmp_do_lsap_event( lsap, LM_CONNECT_INDICATION, skb);
+ irlmp_do_lsap_event(lsap, LM_CONNECT_INDICATION, skb);
break;
case CONNECT_CNF:
- irlmp_do_lsap_event( lsap, LM_CONNECT_CONFIRM, skb);
+ irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb);
break;
case DISCONNECT:
DEBUG( 4, __FUNCTION__ "(), Disconnect indication!\n");
- irlmp_do_lsap_event( lsap, LM_DISCONNECT_INDICATION,
- skb);
+ irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
+ skb);
break;
case ACCESSMODE_CMD:
DEBUG( 0, "Access mode cmd not implemented!\n");
@@ -182,11 +176,18 @@ void irlmp_link_data_indication( struct lap_cb *self, int reliable,
"(), Unknown control frame %02x\n", fp[2]);
break;
}
- } else if ( reliable == LAP_RELIABLE) {
- /* Must be pure data */
- irlmp_do_lsap_event( lsap, LM_DATA_INDICATION, skb);
- } else if ( reliable == LAP_UNRELIABLE) {
- irlmp_do_lsap_event( lsap, LM_UDATA_INDICATION, skb);
+ } else if (reliable == LAP_RELIABLE) {
+ /* Optimize and bypass the state machine if possible */
+ if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
+ irlmp_data_indication(lsap, skb);
+ else
+ irlmp_do_lsap_event(lsap, LM_DATA_INDICATION, skb);
+ } else if (reliable == LAP_UNRELIABLE) {
+ /* Optimize and bypass the state machine if possible */
+ if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
+ irlmp_data_indication(lsap, skb);
+ else
+ irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb);
}
}
@@ -196,24 +197,25 @@ void irlmp_link_data_indication( struct lap_cb *self, int reliable,
* IrLAP has disconnected
*
*/
-void irlmp_link_disconnect_indication( struct lap_cb *lap,
- struct irlap_cb *irlap,
- LAP_REASON reason,
- struct sk_buff *userdata)
+void irlmp_link_disconnect_indication(struct lap_cb *lap,
+ struct irlap_cb *irlap,
+ LAP_REASON reason,
+ struct sk_buff *userdata)
{
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
- ASSERT( lap != NULL, return;);
- ASSERT( lap->magic == LMP_LAP_MAGIC, return;);
+ ASSERT(lap != NULL, return;);
+ ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
lap->reason = reason;
+ lap->daddr = DEV_ADDR_ANY;
/* FIXME: must do something with the userdata if any */
/*
* Inform station state machine
*/
- irlmp_do_lap_event( lap, LM_LAP_DISCONNECT_INDICATION, NULL);
+ irlmp_do_lap_event(lap, LM_LAP_DISCONNECT_INDICATION, NULL);
}
/*
@@ -222,7 +224,8 @@ void irlmp_link_disconnect_indication( struct lap_cb *lap,
* Incoming LAP connection!
*
*/
-void irlmp_link_connect_indication( struct lap_cb *self, struct qos_info *qos,
+void irlmp_link_connect_indication( struct lap_cb *self, __u32 saddr,
+ __u32 daddr, struct qos_info *qos,
struct sk_buff *skb)
{
DEBUG( 4, __FUNCTION__ "()\n");
@@ -230,6 +233,10 @@ void irlmp_link_connect_indication( struct lap_cb *self, struct qos_info *qos,
/* Copy QoS settings for this session */
self->qos = qos;
+ /* Update destination device address */
+ self->daddr = daddr;
+ ASSERT(self->saddr == saddr, return;);
+
irlmp_do_lap_event( self, LM_LAP_CONNECT_INDICATION, skb);
}
@@ -255,6 +262,24 @@ void irlmp_link_connect_confirm( struct lap_cb *self, struct qos_info *qos,
}
/*
+ * Function irlmp_link_discovery_indication (self, log)
+ *
+ * Device is discovering us
+ *
+ */
+void irlmp_link_discovery_indication(struct lap_cb *self,
+ discovery_t *discovery)
+{
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ irlmp_add_discovery(irlmp->cachelog, discovery);
+
+ /* Just handle it the same way as a discovery confirm */
+ irlmp_do_lap_event(self, LM_LAP_DISCOVERY_CONFIRM, NULL);
+}
+
+/*
* Function irlmp_link_discovery_confirm (self, log)
*
* Called by IrLAP with a list of discoveries after the discovery
@@ -262,54 +287,20 @@ void irlmp_link_connect_confirm( struct lap_cb *self, struct qos_info *qos,
* was unable to carry out the discovery request
*
*/
-void irlmp_link_discovery_confirm( struct lap_cb *self, hashbin_t *log)
+void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
{
-/* DISCOVERY *discovery; */
- hashbin_t *old_log;
+ DEBUG(4, __FUNCTION__ "()\n");
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return;);
-
- ASSERT( self->cachelog != NULL, return;);
-
- /*
- * If log is missing this means that IrLAP was unable to perform the
- * discovery, so restart discovery again with just the half timeout
- * of the normal one.
- */
- if ( !log) {
- irlmp_start_discovery_timer( irlmp, 150);
- return;
- }
-
-#if 0
- discovery = hashbin_remove_first( log);
- while ( discovery) {
- DEBUG( 0, __FUNCTION__ "(), found %s\n", discovery->info);
-
- /* Remove any old discovery of this device */
- hashbin_remove( self->cachelog, discovery->daddr, NULL);
-
- /* Insert the new one */
- hashbin_insert( self->cachelog, (QUEUE *) discovery,
- discovery->daddr, NULL);
-
- discovery = hashbin_remove_first( log);
- }
-#endif
- old_log = self->cachelog;
- self->cachelog = log;
- hashbin_delete( old_log, (FREE_FUNC) kfree);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == LMP_LAP_MAGIC, return;);
+
+ irlmp_add_discovery_log(irlmp->cachelog, log);
- irlmp_do_lap_event( self, LM_LAP_DISCOVERY_CONFIRM, NULL);
-
- DEBUG( 4, __FUNCTION__ "() -->\n");
+ irlmp_do_lap_event(self, LM_LAP_DISCOVERY_CONFIRM, NULL);
}
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
-__inline__ void irlmp_update_cache( struct lsap_cb *self)
+inline void irlmp_update_cache(struct lsap_cb *self)
{
/* Update cache entry */
irlmp->cache.dlsap_sel = self->dlsap_sel;
@@ -325,64 +316,56 @@ __inline__ void irlmp_update_cache( struct lsap_cb *self)
* Find handle assosiated with destination and source LSAP
*
*/
-static struct lsap_cb *irlmp_find_lsap( struct lap_cb *self, __u8 dlsap_sel,
- __u8 slsap_sel, int status,
- hashbin_t *queue)
+static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
+ __u8 slsap_sel, int status,
+ hashbin_t *queue)
{
struct lsap_cb *lsap;
- ASSERT( self != NULL, return NULL;);
- ASSERT( self->magic == LMP_LAP_MAGIC, return NULL;);
-
/*
* Optimize for the common case. We assume that the last frame
* received is in the same connection as the last one, so check in
* cache first to avoid the linear search
*/
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
- ASSERT( irlmp != NULL, return NULL;);
-
- if (( irlmp->cache.valid) &&
- ( irlmp->cache.slsap_sel == slsap_sel) &&
- ( irlmp->cache.dlsap_sel == dlsap_sel))
+ if ((irlmp->cache.valid) &&
+ (irlmp->cache.slsap_sel == slsap_sel) &&
+ (irlmp->cache.dlsap_sel == dlsap_sel))
{
- DEBUG( 4, __FUNCTION__ "(), Using cached LSAP\n");
- return ( irlmp->cache.lsap);
- }
+ return (irlmp->cache.lsap);
+ }
#endif
- lsap = ( struct lsap_cb *) hashbin_get_first( queue);
- while ( lsap != NULL) {
+ lsap = (struct lsap_cb *) hashbin_get_first(queue);
+ while (lsap != NULL) {
/*
* If this is an incomming connection, then the destination
* LSAP selector may have been specified as LM_ANY so that
* any client can connect. In that case we only need to check
* if the source LSAP (in our view!) match!
*/
- if (( status == CONNECT_CMD) &&
- ( lsap->slsap_sel == slsap_sel) &&
- ( lsap->dlsap_sel == LSAP_ANY))
+ if ((status == CONNECT_CMD) &&
+ (lsap->slsap_sel == slsap_sel) &&
+ (lsap->dlsap_sel == LSAP_ANY))
{
- DEBUG( 4,"Incoming connection: Setting dlsap_sel=%d\n",
- dlsap_sel);
lsap->dlsap_sel = dlsap_sel;
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
- irlmp_update_cache( lsap);
+ irlmp_update_cache(lsap);
#endif
return lsap;
}
/*
* Check if source LSAP and dest LSAP selectors match.
*/
- if (( lsap->slsap_sel == slsap_sel) &&
- ( lsap->dlsap_sel == dlsap_sel))
+ if ((lsap->slsap_sel == slsap_sel) &&
+ (lsap->dlsap_sel == dlsap_sel))
{
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
- irlmp_update_cache( lsap);
+ irlmp_update_cache(lsap);
#endif
return lsap;
}
- lsap = ( struct lsap_cb *) hashbin_get_next( queue);
+ lsap = ( struct lsap_cb *) hashbin_get_next(queue);
}
/* Sorry not found! */
diff --git a/net/irda/irlpt/irlpt_cli.c b/net/irda/irlpt/irlpt_cli.c
index c98985cfb..a0fbe23d6 100644
--- a/net/irda/irlpt/irlpt_cli.c
+++ b/net/irda/irlpt/irlpt_cli.c
@@ -1,6 +1,6 @@
/*********************************************************************
*
- * Filename: irlpt.c
+ * Filename: irlpt_cli.c
* Version:
* Description:
* Status: Experimental.
@@ -43,24 +43,26 @@
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
-int irlpt_client_init(void);
-static void irlpt_client_cleanup(void);
+int irlpt_client_init(void);
static void irlpt_client_close(struct irlpt_cb *self);
-static void irlpt_client_discovery_indication( DISCOVERY *);
+static void irlpt_client_discovery_indication(discovery_t *);
-static void irlpt_client_connect_confirm( void *instance, void *sap,
- struct qos_info *qos,
- int max_seg_size,
- struct sk_buff *skb);
+static void irlpt_client_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ struct sk_buff *skb);
static void irlpt_client_disconnect_indication( void *instance, void *sap,
LM_REASON reason,
struct sk_buff *userdata);
+static void irlpt_client_expired(unsigned long data);
#if 0
static char *rcsid = "$Id: irlpt_client.c,v 1.10 1998/11/10 22:50:57 dagb Exp $";
#endif
-static char *version = "IrLPT, $Revision: 1.10 $/$Date: 1998/11/10 22:50:57 $ (Thomas Davis)";
+static char *version = "IrLPT client, v2 (Thomas Davis)";
+
+static __u32 ckey; /* IrLMP client handle */
struct file_operations client_fops = {
irlpt_seek, /* seek */
@@ -102,6 +104,9 @@ static int irlpt_client_proc_read( char *buf, char **start, off_t offset,
while( self) {
ASSERT( self != NULL, return len;);
ASSERT( self->magic == IRLPT_MAGIC, return len;);
+ if (self->in_use == FALSE) {
+ break;
+ }
len += sprintf(buf+len, "ifname: %s\n", self->ifname);
len += sprintf(buf+len, "minor: %d\n", self->ir_dev.minor);
@@ -130,19 +135,20 @@ static int irlpt_client_proc_read( char *buf, char **start, off_t offset,
break;
}
- len += sprintf(buf+len, "service_type: %s\n",
- irlpt_service_type[index]);
- len += sprintf(buf+len, "port_type: %s\n",
+ len += sprintf(buf+len, "service_type: %s, port type: %s\n",
+ irlpt_service_type[index],
irlpt_port_type[ self->porttype]);
- len += sprintf(buf+len, "daddr: 0x%08x\n", self->daddr);
- len += sprintf(buf+len, "fsm_state: %s\n",
- irlpt_client_fsm_state[self->state]);
- len += sprintf(buf+len, "retries: %d\n", self->open_retries);
- len += sprintf(buf+len, "dlsap: %d\n", self->dlsap_sel);
- len += sprintf(buf+len, "count: %d\n", self->count);
- len += sprintf(buf+len, "rx_queue: %d\n",
- skb_queue_len(&self->rx_queue));
+ len += sprintf(buf+len, "saddr: 0x%08x, daddr: 0x%08x\n",
+ self->saddr, self->daddr);
+ len += sprintf(buf+len,
+ "retries: %d, count: %d, queued packets: %d\n",
+ self->open_retries,
+ self->count, self->pkt_count);
+ len += sprintf(buf+len, "slsap: %d, dlsap: %d\n",
+ self->slsap_sel, self->dlsap_sel);
+ len += sprintf(buf+len, "fsm state: %s\n",
+ irlpt_client_fsm_state[self->state]);
len += sprintf(buf+len, "\n\n");
self = (struct irlpt_cb *) hashbin_get_next( irlpt_clients);
@@ -151,40 +157,37 @@ static int irlpt_client_proc_read( char *buf, char **start, off_t offset,
return len;
}
-struct proc_dir_entry proc_irlpt_client = {
- 0, 12, "irlpt_client",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irlpt_client_proc_read /* get_info */,
-};
-
-extern struct proc_dir_entry proc_irda;
+extern struct proc_dir_entry *proc_irda;
#endif /* CONFIG_PROC_FS */
/*
- * Function irlpt_init (dev)
+ * Function irlpt_client_init (dev)
*
* Initializes the irlpt control structure
*
*/
__initfunc(int irlpt_client_init(void))
{
+ __u16 hints;
+
DEBUG( irlpt_client_debug, "--> "__FUNCTION__ "\n");
printk( KERN_INFO "%s\n", version);
irlpt_clients = hashbin_new( HB_LOCAL);
if ( irlpt_clients == NULL) {
- printk( KERN_WARNING "IrLPT: Can't allocate hashbin!\n");
+ printk( KERN_WARNING
+ "IrLPT client: Can't allocate hashbin!\n");
return -ENOMEM;
}
-
- irlmp_register_layer( S_PRINTER, CLIENT, TRUE,
- irlpt_client_discovery_indication);
+ hints = irlmp_service_to_hint(S_PRINTER);
+ ckey = irlmp_register_client(hints, irlpt_client_discovery_indication,
+ NULL);
#ifdef CONFIG_PROC_FS
- proc_register( &proc_irda, &proc_irlpt_client);
+ create_proc_entry("irlpt_client", 0, proc_irda)->get_info
+ = irlpt_client_proc_read;
#endif /* CONFIG_PROC_FS */
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
@@ -195,24 +198,22 @@ __initfunc(int irlpt_client_init(void))
#ifdef MODULE
/*
- * Function irlpt_cleanup (void)
- *
- *
+ * Function irlpt_client_cleanup (void)
*
*/
static void irlpt_client_cleanup(void)
{
DEBUG( irlpt_client_debug, "--> "__FUNCTION__ "\n");
- irlmp_unregister_layer( S_PRINTER, CLIENT);
+ irlmp_unregister_client(ckey);
/*
- * Delete hashbin and close all irlan client instances in it
+ * Delete hashbin and close all irlpt client instances in it
*/
hashbin_delete( irlpt_clients, (FREE_FUNC) irlpt_client_close);
#ifdef CONFIG_PROC_FS
- proc_unregister( &proc_irda, proc_irlpt_client.low_ino);
+ remove_proc_entry("irlpt_client", proc_irda);
#endif
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
@@ -221,26 +222,33 @@ static void irlpt_client_cleanup(void)
/*
- * Function irlpt_open (void)
- *
- * This is the entry-point which starts all the fun! Currently this
+ * Function irlpt_client_open (void)
*
*/
static struct irlpt_cb *irlpt_client_open( __u32 daddr)
{
+ struct irmanager_event mgr_event;
struct irlpt_cb *self;
DEBUG( irlpt_client_debug, "--> "__FUNCTION__ "\n");
- self = kmalloc(sizeof(struct irlpt_cb), GFP_ATOMIC);
- if (self == NULL)
- return NULL;
+ self = (struct irlpt_cb *) hashbin_find(irlpt_clients, daddr, NULL);
+
+ if (self == NULL) {
+ self = kmalloc(sizeof(struct irlpt_cb), GFP_ATOMIC);
+ if (self == NULL)
+ return NULL;
+
+ memset(self, 0, sizeof(struct irlpt_cb));
+
+ ASSERT( self != NULL, return NULL;);
- memset(self, 0, sizeof(struct irlpt_cb));
+ sprintf(self->ifname, "irlpt%d",
+ hashbin_get_size(irlpt_clients));
- ASSERT( self != NULL, return NULL;);
+ hashbin_insert( irlpt_clients, (QUEUE *) self, daddr, NULL);
+ }
- sprintf(self->ifname, "irlpt%d", hashbin_get_size(irlpt_clients));
self->ir_dev.minor = MISC_DYNAMIC_MINOR;
self->ir_dev.name = self->ifname;
self->ir_dev.fops = &client_fops;
@@ -251,14 +259,18 @@ static struct irlpt_cb *irlpt_client_open( __u32 daddr)
self->in_use = TRUE;
self->servicetype = IRLPT_THREE_WIRE_RAW;
self->porttype = IRLPT_SERIAL;
+ self->do_event = irlpt_client_do_event;
skb_queue_head_init(&self->rx_queue);
irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
- hashbin_insert( irlpt_clients, (QUEUE *) self, daddr, NULL);
+ /* Tell irmanager to create /dev/irlpt<X> */
+ mgr_event.event = EVENT_IRLPT_START;
+ sprintf(mgr_event.devname, "%s", self->ifname);
+ irmanager_notify(&mgr_event);
- /* MOD_INC_USE_COUNT; */
+ MOD_INC_USE_COUNT;
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
@@ -272,6 +284,7 @@ static struct irlpt_cb *irlpt_client_open( __u32 daddr)
*/
static void irlpt_client_close( struct irlpt_cb *self)
{
+ struct irmanager_event mgr_event;
struct sk_buff *skb;
DEBUG( irlpt_client_debug, "--> " __FUNCTION__ "\n");
@@ -279,18 +292,21 @@ static void irlpt_client_close( struct irlpt_cb *self)
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
+ /* Tell irmanager to remove /dev/irlpt<X> */
+ mgr_event.event = EVENT_IRLPT_STOP;
+ sprintf(mgr_event.devname, "%s", self->ifname);
+ irmanager_notify(&mgr_event);
+
while (( skb = skb_dequeue(&self->rx_queue)) != NULL) {
- DEBUG(3, "irlpt_client_close: freeing SKB\n");
+ DEBUG(irlpt_client_debug,
+ __FUNCTION__ ": freeing SKB\n");
dev_kfree_skb( skb);
}
misc_deregister(&self->ir_dev);
+ self->in_use = FALSE;
- self->magic = ~IRLPT_MAGIC;
-
- kfree( self);
-
- /* MOD_DEC_USE_COUNT; */
+ MOD_DEC_USE_COUNT;
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
@@ -302,53 +318,55 @@ static void irlpt_client_close( struct irlpt_cb *self)
* device it is, and which services it has.
*
*/
-static void irlpt_client_discovery_indication( DISCOVERY *discovery)
+static void irlpt_client_discovery_indication(discovery_t *discovery)
{
struct irlpt_info info;
struct irlpt_cb *self;
- __u32 daddr;
+ __u32 daddr; /* address of remote printer */
+ __u32 saddr; /* address of local link where it was discovered */
DEBUG( irlpt_client_debug, "--> " __FUNCTION__ "\n");
ASSERT( irlpt_clients != NULL, return;);
ASSERT( discovery != NULL, return;);
- daddr = discovery->daddr;
+ daddr = info.daddr = discovery->daddr;
+ saddr = info.saddr = discovery->saddr;
/*
* Check if an instance is already dealing with this device
* (daddr)
*/
self = (struct irlpt_cb *) hashbin_find( irlpt_clients, daddr, NULL);
- if ( self != NULL) {
- ASSERT( self->magic == IRLPT_MAGIC, return;);
- if ( self->state == IRLPT_CLIENT_IDLE) {
- irlpt_client_do_event( self,
- IRLPT_DISCOVERY_INDICATION,
- NULL, &info);
+ if (self == NULL || self->in_use == FALSE) {
+ DEBUG( irlpt_client_debug, __FUNCTION__
+ ": daddr 0x%08x not found or was closed\n", daddr);
+ /*
+ * We have no instance for daddr, so time to start a new
+ * instance. First we must find a free entry in master array
+ */
+ if (( self = irlpt_client_open( daddr)) == NULL) {
+ DEBUG(irlpt_client_debug, __FUNCTION__
+ ": failed!\n");
+ return;
}
- return;
- }
-
-
- /*
- * We have no instance for daddr, so time to start a new instance.
- * First we must find a free entry in master array
- */
- if (( self = irlpt_client_open( daddr)) == NULL) {
- DEBUG(irlpt_client_debug, __FUNCTION__
- ":irlpt_client_open failed!\n");
}
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRLPT_MAGIC, return;);
- self->daddr = info.daddr = daddr;
+ self->daddr = daddr;
+ self->saddr = saddr;
+ self->timeout = irlpt_client_expired;
+
+ irda_start_timer( &self->lpt_timer, 5000, (unsigned long) self,
+ self->timeout);
- if (self->state == IRLPT_CLIENT_IDLE) {
- irlpt_client_do_event( self, IRLPT_DISCOVERY_INDICATION,
- NULL, &info);
- }
+#if 0
+ /* changed to wake up when we get connected; that way,
+ if the connection drops, we can easily kill the link. */
+ wake_up_interruptible( &self->write_wait);
+#endif
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
@@ -357,7 +375,8 @@ static void irlpt_client_discovery_indication( DISCOVERY *discovery)
* Function irlpt_disconnect_indication (handle)
*
*/
-static void irlpt_client_disconnect_indication( void *instance, void *sap,
+static void irlpt_client_disconnect_indication( void *instance,
+ void *sap,
LM_REASON reason,
struct sk_buff *skb)
{
@@ -373,8 +392,8 @@ static void irlpt_client_disconnect_indication( void *instance, void *sap,
info.daddr = self->daddr;
- DEBUG( irlpt_client_debug, __FUNCTION__
- ": reason=%d (%s), peersap=%d\n",
+ DEBUG( irlpt_client_debug,
+ __FUNCTION__ ": reason=%d (%s), peersap=%d\n",
reason, irlpt_reasons[reason], self->dlsap_sel);
self->connected = IRLPT_DISCONNECTED;
@@ -396,10 +415,10 @@ static void irlpt_client_disconnect_indication( void *instance, void *sap,
*
* LSAP connection confirmed!
*/
-static void irlpt_client_connect_confirm( void *instance, void *sap,
- struct qos_info *qos,
- int max_sdu_size,
- struct sk_buff *skb)
+static void irlpt_client_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_sdu_size,
+ struct sk_buff *skb)
{
struct irlpt_info info;
struct irlpt_cb *self;
@@ -413,6 +432,7 @@ static void irlpt_client_connect_confirm( void *instance, void *sap,
info.daddr = self->daddr;
+#if 0
/*
* Check if we have got some QoS parameters back! This should be the
* negotiated QoS for the link.
@@ -421,6 +441,7 @@ static void irlpt_client_connect_confirm( void *instance, void *sap,
DEBUG( irlpt_client_debug, __FUNCTION__ ": Frame Size: %d\n",
qos->data_size.value);
}
+#endif
self->irlap_data_size = (qos->data_size.value - IRLPT_MAX_HEADER);
self->connected = TRUE;
@@ -440,20 +461,20 @@ static void irlpt_client_connect_confirm( void *instance, void *sap,
* This function gets the data that is received on the data channel
*
*/
-static void irlpt_client_data_indication( void *instance, void *sap,
- struct sk_buff *skb)
+static int irlpt_client_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
{
struct irlpt_cb *self;
DEBUG( irlpt_client_debug, "--> " __FUNCTION__ "\n");
- ASSERT( skb != NULL, return;);
+ ASSERT( skb != NULL, return -1;);
DEBUG( irlpt_client_debug, __FUNCTION__ ": len=%d\n", (int) skb->len);
self = ( struct irlpt_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLPT_MAGIC, return;);
+ ASSERT( self != NULL, return -1;);
+ ASSERT( self->magic == IRLPT_MAGIC, return -1;);
#if 1
{
int i;
@@ -481,6 +502,8 @@ static void irlpt_client_data_indication( void *instance, void *sap,
/* } */
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
+
+ return 0;
}
/*
@@ -489,9 +512,8 @@ static void irlpt_client_data_indication( void *instance, void *sap,
* Fixed to match changes in iriap.h, DB.
*
*/
-
-void irlpt_client_get_value_confirm(__u16 obj_id, struct ias_value *value,
- void *priv)
+void irlpt_client_get_value_confirm(int result, __u16 obj_id,
+ struct ias_value *value, void *priv)
{
struct irlpt_info info;
struct irlpt_cb *self;
@@ -505,10 +527,13 @@ void irlpt_client_get_value_confirm(__u16 obj_id, struct ias_value *value,
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
- /* can't stop here.. if we get a bad obj, must tell the state
- machine that!
- ASSERT( type == IAS_INTEGER, return;);
- */
+ /* Check if request succeeded */
+ if (result != IAS_SUCCESS) {
+ DEBUG( 0, __FUNCTION__ "(), got NULL value!\n");
+ irlpt_client_do_event( self, IAS_PROVIDER_NOT_AVAIL, NULL,
+ &info);
+ return;
+ }
if ( value->type == IAS_INTEGER && value->t.integer != -1) {
info.dlsap_sel = value->t.integer;
@@ -518,11 +543,10 @@ void irlpt_client_get_value_confirm(__u16 obj_id, struct ias_value *value,
": obj_id = %d, value = %d\n",
obj_id, value->t.integer);
- irlpt_client_do_event( self, IAS_PROVIDER_AVAIL,
- NULL, &info);
+ irlpt_client_do_event( self, IAS_PROVIDER_AVAIL, NULL, &info);
} else
- irlpt_client_do_event( self, IAS_PROVIDER_NOT_AVAIL,
- NULL, &info);
+ irlpt_client_do_event( self, IAS_PROVIDER_NOT_AVAIL, NULL,
+ &info);
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
@@ -536,9 +560,12 @@ void irlpt_client_connect_request( struct irlpt_cb *self)
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
+ irda_notify_init( &lpt_notify);
+
lpt_notify.connect_confirm = irlpt_client_connect_confirm;
lpt_notify.disconnect_indication = irlpt_client_disconnect_indication;
lpt_notify.data_indication = irlpt_client_data_indication;
+ strcpy( lpt_notify.name, "IrLPT client");
lpt_notify.instance = self;
self->lsap = irlmp_open_lsap( LSAP_ANY, &lpt_notify);
@@ -549,17 +576,51 @@ void irlpt_client_connect_request( struct irlpt_cb *self)
DEBUG( irlpt_client_debug, __FUNCTION__
": issue THREE_WIRE_RAW connect\n");
irlmp_connect_request( self->lsap, self->dlsap_sel,
- self->daddr, NULL, NULL);
+ self->saddr, self->daddr, NULL, NULL);
}
DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
}
+static void irlpt_client_expired(unsigned long data)
+{
+ struct irlpt_cb *self = (struct irlpt_cb *) data;
+ struct sk_buff *skb;
+
+ DEBUG( irlpt_client_debug, "--> " __FUNCTION__ "\n");
+
+ DEBUG( irlpt_client_debug, __FUNCTION__
+ ": removing irlpt_cb!\n");
+
+ ASSERT(self != NULL, return; );
+ ASSERT(self->magic == IRLPT_MAGIC, return;);
+
+ if (self->state == IRLPT_CLIENT_CONN) {
+ skb = dev_alloc_skb(64);
+ if (skb == NULL) {
+ DEBUG( 0, __FUNCTION__ "(: Could not allocate an "
+ "sk_buff of length %d\n", 64);
+ return;
+ }
+
+ skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ irlmp_disconnect_request(self->lsap, skb);
+ DEBUG(irlpt_client_debug, __FUNCTION__
+ ": irlmp_close_slap(self->lsap)\n");
+ irlmp_close_lsap(self->lsap);
+ }
+
+ irlpt_client_close(self);
+
+ DEBUG( irlpt_client_debug, __FUNCTION__ " -->\n");
+}
#ifdef MODULE
MODULE_AUTHOR("Thomas Davis <ratbert@radiks.net>");
-MODULE_DESCRIPTION("The Linux IrDA/IrLPT protocol");
+MODULE_DESCRIPTION("The Linux IrDA/IrLPT client protocol");
+MODULE_PARM(irlpt_client_debug,"1i");
+MODULE_PARM(irlpt_client_fsm_debug,"1i");
/*
* Function init_module (void)
@@ -570,11 +631,11 @@ MODULE_DESCRIPTION("The Linux IrDA/IrLPT protocol");
int init_module(void)
{
- DEBUG( irlpt_client_debug, "--> irlpt client: init_module\n");
+ DEBUG( irlpt_client_debug, "--> IrLPT client: init_module\n");
irlpt_client_init();
- DEBUG( irlpt_client_debug, "irlpt client: init_module -->\n");
+ DEBUG( irlpt_client_debug, "IrLPT client: init_module -->\n");
return 0;
}
@@ -587,13 +648,13 @@ int init_module(void)
*/
void cleanup_module(void)
{
- DEBUG( irlpt_client_debug, "--> irlpt client: cleanup_module\n");
+ DEBUG( irlpt_client_debug, "--> IrLPT client: cleanup_module\n");
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
/* Free some memory */
irlpt_client_cleanup();
- DEBUG( irlpt_client_debug, "irlpt client: cleanup_module -->\n");
+ DEBUG( irlpt_client_debug, "IrLPT client: cleanup_module -->\n");
}
#endif /* MODULE */
diff --git a/net/irda/irlpt/irlpt_cli_fsm.c b/net/irda/irlpt/irlpt_cli_fsm.c
index 6362756a2..75598742a 100644
--- a/net/irda/irlpt/irlpt_cli_fsm.c
+++ b/net/irda/irlpt/irlpt_cli_fsm.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Jan 12 11:06:00 1999
- * Modified at: Tue Jan 12 11:14:22 1999
+ * Modified at: Tue Jan 26 12:02:31 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>
@@ -35,50 +35,53 @@
static char *rcsid = "$Id: irlpt_client_fsm.c,v 1.3 1998/10/05 05:46:44 ratbert Exp $";
#endif
-static int irlpt_client_state_idle ( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_idle ( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info);
-static int irlpt_client_state_query ( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_query ( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info);
static int irlpt_client_state_ready ( struct irlpt_cb *self,
IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info);
-static int irlpt_client_state_waiti ( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_waiti ( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info);
-static int irlpt_client_state_waitr ( struct irlpt_cb *self, IRLPT_EVENT event,
- struct sk_buff *skb,
- struct irlpt_info *info);
-static int irlpt_client_state_conn ( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_conn ( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info);
-int irlpt_client_fsm_debug = 3;
+int irlpt_client_fsm_debug = 4;
-int (*irlpt_client_state[])( struct irlpt_cb *self, IRLPT_EVENT event,
- struct sk_buff *skb,
- struct irlpt_info *info) =
+int (*irlpt_client_state[])( struct irlpt_cb *self,
+ IRLPT_EVENT event,
+ struct sk_buff *skb,
+ struct irlpt_info *info) =
{
irlpt_client_state_idle,
irlpt_client_state_query,
irlpt_client_state_ready,
irlpt_client_state_waiti,
- irlpt_client_state_waitr,
irlpt_client_state_conn,
};
-void irlpt_client_do_event( struct irlpt_cb *self, IRLPT_EVENT event,
- struct sk_buff *skb,
- struct irlpt_info *info)
+void irlpt_client_do_event( struct irlpt_cb *self,
+ IRLPT_EVENT event,
+ struct sk_buff *skb,
+ struct irlpt_info *info)
{
- DEBUG( irlpt_client_fsm_debug,"--> " __FUNCTION__ "\n");
+ DEBUG( irlpt_client_fsm_debug, "--> " __FUNCTION__ "\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": STATE = %s, EVENT = %s\n",
+ DEBUG( irlpt_client_fsm_debug,
+ __FUNCTION__ ": STATE = %s, EVENT = %s\n",
irlpt_server_fsm_state[self->state], irlpt_fsm_event[event]);
(*irlpt_client_state[ self->state]) ( self, event, skb, info);
@@ -86,9 +89,10 @@ void irlpt_client_do_event( struct irlpt_cb *self, IRLPT_EVENT event,
DEBUG( irlpt_client_fsm_debug, __FUNCTION__ " -->\n");
}
-void irlpt_client_next_state( struct irlpt_cb *self, IRLPT_CLIENT_STATE state)
+void irlpt_client_next_state( struct irlpt_cb *self,
+ IRLPT_CLIENT_STATE state)
{
- DEBUG( irlpt_client_fsm_debug,"--> " __FUNCTION__ ":\n");
+ DEBUG( irlpt_client_fsm_debug, "--> " __FUNCTION__ ":\n");
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
@@ -102,16 +106,17 @@ void irlpt_client_next_state( struct irlpt_cb *self, IRLPT_CLIENT_STATE state)
}
/*
- * Function client_state_idle (event, skb, info)
+ * Function irlpt_client_state_idle (self, event, skb, info)
*
* IDLE, We are waiting for an indication that there is a provider
* available.
*/
-static int irlpt_client_state_idle( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_idle( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info)
{
- DEBUG( irlpt_client_fsm_debug,"--> " __FUNCTION__ ":\n");
+ DEBUG( irlpt_client_fsm_debug, "--> " __FUNCTION__ ":\n");
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == IRLPT_MAGIC, return -1;);
@@ -120,11 +125,13 @@ static int irlpt_client_state_idle( struct irlpt_cb *self, IRLPT_EVENT event,
case IRLPT_DISCOVERY_INDICATION:
/* Get some values from peer IAS */
DEBUG( irlpt_client_fsm_debug, __FUNCTION__
- ": IRLPT_DISCOVERY_INDICATION, sending getvaluebyclass command..\n");
- iriap_getvaluebyclass_request( info->daddr,
- "IrLPT", "IrDA:IrLMP:LsapSel",
- irlpt_client_get_value_confirm,
- (void *) self);
+ ": IRLPT_DISCOVERY_INDICATION, "
+ "sending getvaluebyclass command..\n");
+ iriap_getvaluebyclass_request(
+ "IrLPT", "IrDA:IrLMP:LsapSel",
+ info->saddr, info->daddr,
+ irlpt_client_get_value_confirm,
+ (void *) self);
irlpt_client_next_state( self, IRLPT_CLIENT_QUERY);
break;
@@ -145,36 +152,53 @@ static int irlpt_client_state_idle( struct irlpt_cb *self, IRLPT_EVENT event,
}
/*
- * Function client_state_query
+ * Function irlpt_client_state_query
*
* QUERY, We have queryed the remote IAS and is ready to connect
* to provider, just waiting for the confirm.
*
*/
-static int irlpt_client_state_query( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_query( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info)
{
- DEBUG( irlpt_client_fsm_debug,"--> " __FUNCTION__ ":\n");
+ DEBUG( irlpt_client_fsm_debug, "--> " __FUNCTION__ ":\n");
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == IRLPT_MAGIC, return -1;);
switch( event) {
case IAS_PROVIDER_AVAIL:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": IAS_PROVIDER_AVAIL\n");
+ DEBUG( irlpt_client_fsm_debug,
+ __FUNCTION__ ": IAS_PROVIDER_AVAIL\n");
self->open_retries = 0;
irlpt_client_next_state( self, IRLPT_CLIENT_READY);
- irlpt_client_do_event( self, IRLPT_CONNECT_REQUEST, NULL, NULL);
+ irlpt_client_do_event( self, IRLPT_CONNECT_REQUEST,
+ NULL, NULL);
break;
case IAS_PROVIDER_NOT_AVAIL:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": IAS_PROVIDER_NOT_AVAIL\n");
+ DEBUG( irlpt_client_fsm_debug,
+ __FUNCTION__ ": IAS_PROVIDER_NOT_AVAIL\n");
irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
+ DEBUG(irlpt_client_fsm_debug, __FUNCTION__
+ ": waking any sleepers..\n");
+ wake_up_interruptible(&self->read_wait);
break;
+ case LMP_DISCONNECT:
+ case LAP_DISCONNECT:
+ DEBUG( irlpt_client_fsm_debug, __FUNCTION__
+ ": LMP_DISCONNECT or LAP_DISCONNECT\n");
+ irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
+ DEBUG(irlpt_client_fsm_debug, __FUNCTION__
+ ": waking any sleepers..\n");
+ wake_up_interruptible(&self->read_wait);
+ break;
default:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": Unknown event %d (%s)\n",
+ DEBUG( irlpt_client_fsm_debug,
+ __FUNCTION__ ": Unknown event %d (%s)\n",
event, irlpt_fsm_event[event]);
break;
}
@@ -189,11 +213,12 @@ static int irlpt_client_state_query( struct irlpt_cb *self, IRLPT_EVENT event,
}
/*
- * Function client_state_info
+ * Function irlpt_client_state_info
*
* INFO, We have issued a GetInfo command and is awaiting a reply.
*/
-static int irlpt_client_state_ready( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_ready( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info)
{
@@ -204,8 +229,8 @@ static int irlpt_client_state_ready( struct irlpt_cb *self, IRLPT_EVENT event,
switch( event) {
case IRLPT_CONNECT_REQUEST:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__
- ": IRLPT_CONNECT_REQUEST\n");
+ DEBUG( irlpt_client_fsm_debug,
+ __FUNCTION__ ": IRLPT_CONNECT_REQUEST\n");
irlpt_client_connect_request(self);
irlpt_client_next_state( self, IRLPT_CLIENT_WAITI);
break;
@@ -213,6 +238,9 @@ static int irlpt_client_state_ready( struct irlpt_cb *self, IRLPT_EVENT event,
case LAP_DISCONNECT:
DEBUG( irlpt_client_fsm_debug, __FUNCTION__
": LMP_DISCONNECT or LAP_DISCONNECT\n");
+ DEBUG(irlpt_client_fsm_debug, __FUNCTION__
+ ": waking any sleepers..\n");
+ wake_up_interruptible(&self->read_wait);
irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
break;
default:
@@ -233,11 +261,12 @@ static int irlpt_client_state_ready( struct irlpt_cb *self, IRLPT_EVENT event,
/*
- * Function client_state_waiti
+ * Function irlpt_client_state_waiti
*
*
*/
-static int irlpt_client_state_waiti( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_waiti( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info)
{
@@ -250,10 +279,17 @@ static int irlpt_client_state_waiti( struct irlpt_cb *self, IRLPT_EVENT event,
case LMP_CONNECT:
DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": LMP_CONNECT\n");
irlpt_client_next_state(self, IRLPT_CLIENT_CONN);
+ DEBUG(irlpt_client_fsm_debug, __FUNCTION__
+ ": waking any sleepers..\n");
+ wake_up_interruptible(&self->read_wait);
break;
case LAP_DISCONNECT:
case LMP_DISCONNECT:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": LMP_DISCONNECT\n");
+ DEBUG( irlpt_client_fsm_debug,
+ __FUNCTION__ ": LMP_DISCONNECT\n");
+ DEBUG(irlpt_client_fsm_debug, __FUNCTION__
+ ": waking any sleepers..\n");
+ wake_up_interruptible(&self->read_wait);
irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
break;
@@ -274,63 +310,18 @@ static int irlpt_client_state_waiti( struct irlpt_cb *self, IRLPT_EVENT event,
}
/*
- * Function client_state_waitr
- *
- *
- */
-static int irlpt_client_state_waitr( struct irlpt_cb *self, IRLPT_EVENT event,
- struct sk_buff *skb,
- struct irlpt_info *info)
-{
- DEBUG( irlpt_client_fsm_debug,"--> " __FUNCTION__ ":\n");
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRLPT_MAGIC, return -1;);
-
- switch( event) {
- case LMP_CONNECT:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": LMP_CONNECT\n");
- irlpt_client_next_state(self, IRLPT_CLIENT_CONN);
- break;
-
- case LMP_DISCONNECT:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": LMP_DISCONNECT\n");
- irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
- break;
-
- case LAP_DISCONNECT:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ ": LAP_DISCONNECT\n");
- irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
- break;
-
- default:
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__
- ": Unknown event %d, (%s)\n",
- event, irlpt_fsm_event[event]);
- break;
- }
-
- if ( skb) {
- dev_kfree_skb( skb);
- }
-
- DEBUG( irlpt_client_fsm_debug, __FUNCTION__ " -->\n");
-
- return 0;
-}
-
-/*
- * Function client_state_conn (event, skb, info)
+ * Function irlpt_client_state_conn (self, event, skb, info)
*
* CONN, We have connected to a provider but has not issued any
* commands yet.
*
*/
-static int irlpt_client_state_conn( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_client_state_conn( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info)
{
- DEBUG( irlpt_client_fsm_debug,"--> " __FUNCTION__ ":\n");
+ DEBUG( irlpt_client_fsm_debug, "--> " __FUNCTION__ ":\n");
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == IRLPT_MAGIC, return -1;);
@@ -346,6 +337,10 @@ static int irlpt_client_state_conn( struct irlpt_cb *self, IRLPT_EVENT event,
case LAP_DISCONNECT:
DEBUG( irlpt_client_fsm_debug, __FUNCTION__
": LMP_DISCONNECT/LAP_DISCONNECT\n");
+ DEBUG(irlpt_client_fsm_debug, __FUNCTION__
+ ": waking any sleepers..\n");
+ wake_up_interruptible(&self->read_wait);
+ irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
irlpt_client_next_state( self, IRLPT_CLIENT_IDLE);
break;
diff --git a/net/irda/irlpt/irlpt_common.c b/net/irda/irlpt/irlpt_common.c
index 886eb30a0..b4512736a 100644
--- a/net/irda/irlpt/irlpt_common.c
+++ b/net/irda/irlpt/irlpt_common.c
@@ -68,7 +68,7 @@ char *irlpt_connected[] = {
char *irlpt_reasons[] = {
"SERVICE_CLOSE", /* Service has closed the connection */
- "DISC_INDICATION", /* Received a disconnect request from peer entity*/
+ "DISC_INDICATION", /* Received disconnect request from peer entity*/
"NO_RESPONSE", /* To many retransmits without response */
"DEADLOCK_DETECTED", /* To many retransmits _with_ response */
"FOUND_NONE", /* No devices were discovered */
@@ -81,7 +81,6 @@ char *irlpt_client_fsm_state[] = {
"IRLPT_CLIENT_QUERY",
"IRLPT_CLIENT_READY",
"IRLPT_CLIENT_WAITI",
- "IRLPT_CLIENT_WAITR",
"IRLPT_CLIENT_CONN"
};
@@ -108,9 +107,8 @@ char *irlpt_fsm_event[] = {
hashbin_t *irlpt_clients = NULL;
struct irlpt_cb *irlpt_server = NULL;
int irlpt_common_debug = 4; /* want to change this? please don't!
- use irlpt_common_debug=3 on the command line! */
-
-static struct wait_queue *irlpt_wait;
+ use irlpt_common_debug=3 on the
+ command line! */
#if 0
static char *rcsid = "$Id: irlpt_common.c,v 1.6 1998/11/10 22:50:58 dagb Exp $";
@@ -152,7 +150,6 @@ ssize_t irlpt_read( struct file *file, char *buffer, size_t count, loff_t
char *ptr = buffer;
struct irlpt_cb *self;
struct sk_buff *skb = NULL;
- struct wait_queue wait = { current, NULL };
DEBUG(irlpt_common_debug, "--> " __FUNCTION__ "\n");
@@ -170,11 +167,12 @@ ssize_t irlpt_read( struct file *file, char *buffer, size_t count, loff_t
switch (self->eof) {
case LM_USER_REQUEST:
self->eof = FALSE;
- DEBUG(3, "irlpt_read: returning 0\n");
+ DEBUG(irlpt_common_debug,
+ __FUNCTION__ ": returning 0\n");
return 0;
case LM_LAP_DISCONNECT:
self->eof = FALSE;
- return -EIO;
+ return 0;
case LM_LAP_RESET:
self->eof = FALSE;
return -ECONNRESET;
@@ -208,11 +206,7 @@ ssize_t irlpt_read( struct file *file, char *buffer, size_t count, loff_t
len, count, self->eof);
if (!signal_pending(current) && !self->eof) {
- add_wait_queue(&irlpt_wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- current->state = TASK_RUNNING;
- remove_wait_queue(&irlpt_wait, &wait);
+ interruptible_sleep_on(&self->read_wait);
} else
break;
}
@@ -228,7 +222,6 @@ ssize_t irlpt_write(struct file *file, const char *buffer,
{
struct irlpt_cb *self;
struct sk_buff *skb;
- struct wait_queue wait = { current, NULL };
DEBUG(irlpt_common_debug, "--> " __FUNCTION__ "\n");
@@ -240,26 +233,25 @@ ssize_t irlpt_write(struct file *file, const char *buffer,
DEBUG( irlpt_common_debug, __FUNCTION__
": count = %d\n", count);
- if (self->state != IRLPT_CLIENT_CONN) {
+ DEBUG( irlpt_common_debug, __FUNCTION__
+ ": pkt_count = %d\n", self->pkt_count);
+ if (self->pkt_count > 8) {
DEBUG( irlpt_common_debug, __FUNCTION__
- ": state != IRLPT_CONN (possible link problems?)\n");
- return -ENOLINK;
+ ": too many outstanding buffers, going to sleep\n");
+ interruptible_sleep_on(&self->write_wait);
}
DEBUG( irlpt_common_debug, __FUNCTION__
": pkt_count = %d\n", self->pkt_count);
- if (self->pkt_count > 8) {
+
+ if (self->state != IRLPT_CLIENT_CONN) {
DEBUG( irlpt_common_debug, __FUNCTION__
- ": too many outstanding buffers, going to sleep\n");
- add_wait_queue(&self->write_wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- current->state = TASK_RUNNING;
- remove_wait_queue(&self->write_wait, &wait);
+ ": state != IRLPT_CONN (possible link problems?)\n");
+ return -ENOLINK;
}
DEBUG( irlpt_common_debug, __FUNCTION__
- ":count = %d, irlap_data_size = %d, IRLPT_MAX_HEADER = %d\n",
+ ": count = %d, irlap_data_size = %d, IRLPT_MAX_HEADER = %d\n",
count, self->irlap_data_size, IRLPT_MAX_HEADER);
if (count > (self->irlap_data_size - IRLPT_MAX_HEADER)) {
@@ -272,7 +264,8 @@ ssize_t irlpt_write(struct file *file, const char *buffer,
skb = dev_alloc_skb(count + IRLPT_MAX_HEADER);
if ( skb == NULL) {
- printk( KERN_INFO __FUNCTION__ ": couldn't allocate skbuff!\n");
+ printk( KERN_INFO
+ __FUNCTION__ ": couldn't allocate skbuff!\n");
return 0;
}
@@ -292,6 +285,9 @@ ssize_t irlpt_write(struct file *file, const char *buffer,
irlmp_data_request(self->lsap, skb);
+ irda_start_timer( &self->lpt_timer, 5000, (unsigned long) self,
+ self->timeout);
+
DEBUG(irlpt_common_debug, __FUNCTION__ " -->\n");
return(count);
@@ -305,60 +301,13 @@ loff_t irlpt_seek( struct file *file, loff_t offset, int count)
return -ESPIPE;
}
-#if 0
-
/*
- * Function irlpt_select (inode, filp, mode, table)
- *
- * Implementation for the select() call
- *
- */
-int irlpt_select( struct inode *inode, struct file *filp, int mode,
- select_table *table)
-{
- struct irlpt_cb *self;
-
- DEBUG(irlpt_common_debug, "--> " __FUNCTION__ "\n");
-
- self = irlpt_find_handle(MINOR( inode->i_rdev));
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IRLPT_MAGIC, return -1;);
-
- switch (mode) {
- case SEL_IN:
- if ( skb_queue_len( &self->rx_queue))
- return 1; /* Readable */
- select_wait( &self->read_wait, table);
- break;
- case SEL_OUT:
- if ( self->connected)
- return 1;
- select_wait( &self->write_wait, table);
- break;
- case SEL_EX:
- if ( self->connected)
- return 1;
- select_wait( &self->ex_wait, table);
- break;
- default:
- break;
- }
-
- DEBUG(irlpt_common_debug, __FUNCTION__ " -->\n");
-
- return 0;
-}
-
-#else
-
-/*
- * Function irobex_poll (file, wait)
+ * Function irlpt_poll (file, wait)
*
*
*
*/
-static u_int irlpt_poll(struct file *file, poll_table *wait)
+u_int irlpt_poll(struct file *file, poll_table *wait)
{
DEBUG(irlpt_common_debug, "--> " __FUNCTION__ "\n");
@@ -367,8 +316,6 @@ static u_int irlpt_poll(struct file *file, poll_table *wait)
return 0;
}
-#endif
-
/*
* Function open_irlpt (inode, file)
*
@@ -378,6 +325,7 @@ static u_int irlpt_poll(struct file *file, poll_table *wait)
int irlpt_open(struct inode *inode, struct file *file)
{
struct irlpt_cb *self;
+ struct irlpt_info info;
DEBUG(irlpt_common_debug, "--> " __FUNCTION__ "\n");
@@ -386,22 +334,6 @@ int irlpt_open(struct inode *inode, struct file *file)
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == IRLPT_MAGIC, return -1;);
-#if 0
- if (self->state == IRLPT_IDLE) {
- DEBUG( irlpt_common_debug, __FUNCTION__
- ": state == IRLPT_IDLE! (no device found yet)\n");
- return -ENODEV;
- }
-
- if (self->state == IRLPT_QUERY ||
- self->state == IRLPT_READY ||
- self->state == IRLPT_WAITI) {
- DEBUG( irlpt_common_debug, __FUNCTION__ ": state == IRLPT_QUERY, "
- "IRLPT_READY or IRLPT_WAITI (link problems)!\n");
- return -EBUSY;
- }
-#endif
-
if (self->count++) {
DEBUG( irlpt_common_debug, __FUNCTION__
": count not zero; actual = %d\n", self->count);
@@ -409,6 +341,35 @@ int irlpt_open(struct inode *inode, struct file *file)
return -EBUSY;
}
+ self->eof = FALSE;
+
+ /* ok, now, if it's idle, try to get some information
+ about the remote end, and sleep till we get totally connected.. */
+
+ if ((self->servicetype != IRLPT_SERVER_MODE) &&
+ self->state != IRLPT_CLIENT_CONN) {
+ DEBUG(irlpt_common_debug, __FUNCTION__
+ ": self->state != IRLPT_CLIENT_CONN\n");
+
+ info.daddr = self->daddr;
+ info.saddr = self->saddr;
+
+ if (self->do_event != NULL) {
+ DEBUG(irlpt_common_debug, __FUNCTION__
+ ": doing a discovery..\n");
+ self->do_event( self,
+ IRLPT_DISCOVERY_INDICATION,
+ NULL, &info);
+ DEBUG(irlpt_common_debug, __FUNCTION__
+ ": sleeping until connected.\n");
+ interruptible_sleep_on(&self->read_wait);
+ }
+ }
+
+ /* at this point, if it's a client, we have a connection.
+ * if it's the server, it's waiting for a connection.
+ */
+
DEBUG(irlpt_common_debug, __FUNCTION__ " -->\n");
return 0;
@@ -420,9 +381,12 @@ int irlpt_open(struct inode *inode, struct file *file)
*
*
*/
-int irlpt_close(struct inode *inode, struct file *file)
+int irlpt_close(struct inode *inode,
+ struct file *file)
{
struct irlpt_cb *self;
+ struct sk_buff *skb;
+ struct irlpt_info info;
DEBUG(irlpt_common_debug, "--> " __FUNCTION__ "\n");
@@ -433,12 +397,42 @@ int irlpt_close(struct inode *inode, struct file *file)
ASSERT( self != NULL, return -1;);
ASSERT( self->magic == IRLPT_MAGIC, return -1;);
- DEBUG(irlpt_common_debug, __FUNCTION__ ": self->count=%d\n", self->count);
+ DEBUG(irlpt_common_debug,
+ __FUNCTION__ ": self->count=%d\n", self->count);
+
if (self->count > 0)
self->count--;
- DEBUG(irlpt_common_debug, __FUNCTION__ " -->\n");
+ while (self->pkt_count > 0) {
+ interruptible_sleep_on(&self->write_wait);
+ }
+
+ /* all done, tear down the connection and wait for the next open */
+ if ((self->servicetype != IRLPT_SERVER_MODE) &&
+ self->state == IRLPT_CLIENT_CONN) {
+ skb = dev_alloc_skb(64);
+ if (skb == NULL) {
+ DEBUG( 0, __FUNCTION__ "(: Could not allocate an "
+ "sk_buff of length %d\n", 64);
+ return 0;
+ }
+
+ skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
+ irlmp_disconnect_request(self->lsap, skb);
+ DEBUG(irlpt_common_debug, __FUNCTION__
+ ": irlmp_close_slap(self->lsap)\n");
+ irlmp_close_lsap(self->lsap);
+ }
+
+ info.daddr = self->daddr;
+ if (self->do_event != NULL) {
+ DEBUG(irlpt_common_debug, __FUNCTION__
+ ": closing connection..\n");
+ self->do_event( self, LMP_DISCONNECT, NULL, &info);
+ }
+
+ DEBUG(irlpt_common_debug, __FUNCTION__ " -->\n");
return 0;
}
@@ -486,6 +480,10 @@ void irlpt_flow_control(struct sk_buff *skb)
#ifdef MODULE
+MODULE_AUTHOR("Thomas Davis <ratbert@radiks.net>");
+MODULE_DESCRIPTION("The Linux IrDA/IrLPT common");
+MODULE_PARM(irlpt_common_debug,"1i");
+
/*
* Function init_module (void)
*
diff --git a/net/irda/irlpt/irlpt_srvr.c b/net/irda/irlpt/irlpt_srvr.c
index b7d6f1bb9..a1362d0dc 100644
--- a/net/irda/irlpt/irlpt_srvr.c
+++ b/net/irda/irlpt/irlpt_srvr.c
@@ -48,24 +48,24 @@ static int irlpt_server_proc_read(char *buf, char **start, off_t offset,
#endif /* CONFIG_PROC_FS */
int irlpt_server_init(void);
-static void irlpt_server_cleanup(void);
-static void irlpt_server_disconnect_indication( void *instance, void *sap,
+static void irlpt_server_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *skb);
-static void irlpt_server_connect_confirm( void *instance, void *sap,
- struct qos_info *qos,
- int max_seg_size,
- struct sk_buff *skb);
-static void irlpt_server_connect_indication( void *instance, void *sap,
- struct qos_info *qos,
- int max_seg_size,
- struct sk_buff *skb);
-static void irlpt_server_data_indication( void *instance, void *sap,
- struct sk_buff *skb);
+static void irlpt_server_connect_confirm(void *instance, void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ struct sk_buff *skb);
+static void irlpt_server_connect_indication(void *instance,
+ void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ struct sk_buff *skb);
+static int irlpt_server_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
static void register_irlpt_server(void);
static void deregister_irlpt_server(void);
-static struct wait_queue *irlpt_server_wait;
+static __u32 skey; /* IrLMP service handle */
int irlpt_server_lsap = LSAP_IRLPT;
int irlpt_server_debug = 4;
@@ -73,7 +73,8 @@ int irlpt_server_debug = 4;
#if 0
static char *rcsid = "$Id: irlpt_server.c,v 1.9 1998/10/22 12:02:22 dagb Exp $";
#endif
-static char *version = "IrLPT server, $Revision: 1.9 $/$Date: 1998/10/22 12:02:22 $ (Thomas Davis)";
+
+static char *version = "IrLPT server, v2 (Thomas Davis)";
struct file_operations irlpt_fops = {
irlpt_seek, /* seek */
@@ -96,7 +97,7 @@ struct file_operations irlpt_fops = {
#ifdef CONFIG_PROC_FS
/*
- * Function proc_irlpt_read (buf, start, offset, len, unused)
+ * Function irlpt_server_proc_read (buf, start, offset, len, unused)
*
*
*
@@ -137,21 +138,19 @@ static int irlpt_server_proc_read(char *buf, char **start, off_t offset,
break;
}
- len += sprintf(buf+len, "servicetype: %s\n",
- irlpt_service_type[index]);
- len += sprintf(buf+len, "porttype: %s\n",
+ len += sprintf(buf+len, "servicetype: %s, porttype: %s\n",
+ irlpt_service_type[index],
irlpt_port_type[irlpt_server->porttype]);
- len += sprintf(buf+len, "daddr: %d\n",
- irlpt_server->daddr);
- len += sprintf(buf+len, "state: %s\n",
- irlpt_server_fsm_state[irlpt_server->state]);
- len += sprintf(buf+len, "retries: %d\n",
- irlpt_server->open_retries);
- len += sprintf(buf+len, "peersap: %d\n",
+ len += sprintf(buf+len, "saddr: 0x%08x, daddr: 0x%08x\n",
+ irlpt_server->saddr, irlpt_server->daddr);
+ len += sprintf(buf+len, "slsap: 0x%08x, dlsap: 0x%08x\n",
+ irlpt_server->slsap_sel,
irlpt_server->dlsap_sel);
- len += sprintf(buf+len, "count: %d\n",
+ len += sprintf(buf+len, "retries: %d, count: %d\n",
+ irlpt_server->open_retries,
irlpt_server->count);
- len += sprintf(buf+len, "rx_queue: %d\n",
+ len += sprintf(buf+len, "fsm state: %s, rx queue depth: %d\n",
+ irlpt_server_fsm_state[irlpt_server->state],
skb_queue_len(&irlpt_server->rx_queue));
len += sprintf(buf+len, "\n");
}
@@ -161,14 +160,7 @@ static int irlpt_server_proc_read(char *buf, char **start, off_t offset,
return len;
}
-extern struct proc_dir_entry proc_irda;
-
-struct proc_dir_entry proc_irlpt_server = {
- 0, 12, "irlpt_server",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irlpt_server_proc_read /* get_info */,
-};
+extern struct proc_dir_entry *proc_irda;
#endif /* CONFIG_PROC_FS */
@@ -182,6 +174,8 @@ struct proc_dir_entry proc_irlpt_server = {
/*int irlpt_init( struct device *dev) {*/
__initfunc(int irlpt_server_init(void))
{
+ __u16 hints;
+
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
printk( KERN_INFO "%s\n", version);
@@ -194,7 +188,6 @@ __initfunc(int irlpt_server_init(void))
" irlpt_server control block!\n");
return -ENOMEM;
}
-
memset( irlpt_server, 0, sizeof(struct irlpt_cb));
sprintf(irlpt_server->ifname, "irlpt_server");
@@ -209,12 +202,14 @@ __initfunc(int irlpt_server_init(void))
skb_queue_head_init(&irlpt_server->rx_queue);
- irlmp_register_layer( S_PRINTER, SERVER, FALSE, NULL);
+ hints = irlmp_service_to_hint(S_PRINTER);
+ skey = irlmp_register_service(hints);
register_irlpt_server();
#ifdef CONFIG_PROC_FS
- proc_register( &proc_irda, &proc_irlpt_server);
+ create_proc_entry("irlpt_server", 0, proc_irda)->get_info
+ = irlpt_server_proc_read;
#endif /* CONFIG_PROC_FS */
DEBUG( irlpt_server_debug, __FUNCTION__ " -->\n");
@@ -234,12 +229,11 @@ static void irlpt_server_cleanup(void)
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
+ irlmp_unregister_service(skey);
deregister_irlpt_server();
while (( skb = skb_dequeue(&irlpt_server->rx_queue)) != NULL) {
DEBUG(irlpt_server_debug, __FUNCTION__ ": freeing SKB\n");
- IS_SKB( skb, return;);
- FREE_SKB_MAGIC( skb);
dev_kfree_skb( skb);
}
@@ -248,7 +242,7 @@ static void irlpt_server_cleanup(void)
kfree(irlpt_server);
#ifdef CONFIG_PROC_FS
- proc_unregister( &proc_irda, proc_irlpt_server.low_ino);
+ remove_proc_entry("irlpt_server", proc_irda);
#endif
DEBUG( irlpt_server_debug, __FUNCTION__ " -->\n");
@@ -258,11 +252,14 @@ static void irlpt_server_cleanup(void)
* Function irlpt_disconnect_indication (handle)
*
*/
-static void irlpt_server_disconnect_indication( void *instance, void *sap,
+static void irlpt_server_disconnect_indication( void *instance,
+ void *sap,
LM_REASON reason,
struct sk_buff *userdata)
{
+#if 0
struct irlpt_info info;
+#endif
struct irlpt_cb *self;
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
@@ -272,17 +269,22 @@ static void irlpt_server_disconnect_indication( void *instance, void *sap,
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
+#if 0
info.daddr = self->daddr;
+#endif
- DEBUG( irlpt_server_debug, __FUNCTION__ ": reason=%d (%s), dlsap_sel=%d\n",
+ DEBUG( irlpt_server_debug,
+ __FUNCTION__ ": reason=%d (%s), dlsap_sel=%d\n",
reason, irlpt_reasons[reason], self->dlsap_sel);
self->connected = IRLPT_DISCONNECTED;
self->eof = reason;
- wake_up_interruptible(&irlpt_server_wait);
+ wake_up_interruptible(&self->read_wait);
+ wake_up_interruptible(&self->write_wait);
+ wake_up_interruptible(&self->ex_wait);
- DEBUG( irlpt_server_debug, __FUNCTION__ ": skb_queue_len=%d\n",
+ DEBUG( irlpt_server_debug, __FUNCTION__ ": rx queue length: %d\n",
skb_queue_len(&irlpt_server->rx_queue));
irlpt_server_do_event( self, LMP_DISCONNECT, NULL, NULL);
@@ -298,12 +300,12 @@ static void irlpt_server_disconnect_indication( void *instance, void *sap,
*
* LSAP connection confirmed!
*/
-static void irlpt_server_connect_confirm( void *instance, void *sap,
- struct qos_info *qos,
- int max_seg_size,
- struct sk_buff *skb)
+static void irlpt_server_connect_confirm(void *instance,
+ void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ struct sk_buff *skb)
{
- struct irlpt_info info;
struct irlpt_cb *self;
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
@@ -312,21 +314,6 @@ static void irlpt_server_connect_confirm( void *instance, void *sap,
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
- info.daddr = self->daddr;
-
- /*
- * Check if we have got some QoS parameters back! This should be the
- * negotiated QoS for the link.
- */
- if ( qos) {
- DEBUG( irlpt_server_debug, __FUNCTION__
- ": IrLPT Negotiated BAUD_RATE: %02x\n",
- qos->baud_rate.bits);
- DEBUG( irlpt_server_debug, __FUNCTION__
- ": IrLPT Negotiated BAUD_RATE: %d bps.\n",
- qos->baud_rate.value);
- }
-
self->connected = TRUE;
irlpt_server_do_event( self, LMP_CONNECT, NULL, NULL);
@@ -338,19 +325,20 @@ static void irlpt_server_connect_confirm( void *instance, void *sap,
* Function irlpt_connect_indication (handle)
*
*/
-static void irlpt_server_connect_indication( void *instance, void *sap,
- struct qos_info *qos,
- int max_seg_size,
- struct sk_buff *skb)
+static void irlpt_server_connect_indication(void *instance,
+ void *sap,
+ struct qos_info *qos,
+ __u32 max_seg_size,
+ struct sk_buff *skb)
{
struct irlpt_cb *self;
struct irlpt_info info;
- struct lsap_cb *lsap;
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
self = ( struct irlpt_cb *) instance;
- lsap = (struct lsap_cb *) sap;
+
+ info.lsap = (struct lsap_cb *) sap;
ASSERT( self != NULL, return;);
ASSERT( self->magic == IRLPT_MAGIC, return;);
@@ -358,8 +346,6 @@ static void irlpt_server_connect_indication( void *instance, void *sap,
self->connected = IRLPT_CONNECTED;
self->eof = FALSE;
- info.lsap = lsap;
-
irlpt_server_do_event( self, LMP_CONNECT, NULL, &info);
if (skb) {
@@ -370,25 +356,24 @@ static void irlpt_server_connect_indication( void *instance, void *sap,
}
/*
- * Function irlpt_data_indication (handle, skb)
+ * Function irlpt_server_data_indication (handle, skb)
*
* This function gets the data that is received on the data channel
*
*/
-static void irlpt_server_data_indication( void *instance, void *sap,
- struct sk_buff *skb)
+static int irlpt_server_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
{
-
struct irlpt_cb *self;
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
self = ( struct irlpt_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IRLPT_MAGIC, return;);
+ ASSERT( self != NULL, return -1;);
+ ASSERT( self->magic == IRLPT_MAGIC, return -1;);
- ASSERT( skb != NULL, return;);
+ ASSERT( skb != NULL, return -1;);
DEBUG( irlpt_server_debug, __FUNCTION__ ": len=%d\n", (int) skb->len);
@@ -397,9 +382,11 @@ static void irlpt_server_data_indication( void *instance, void *sap,
#endif
skb_queue_tail(&self->rx_queue, skb);
- wake_up_interruptible(&irlpt_server_wait);
+ wake_up_interruptible(&self->read_wait);
DEBUG( irlpt_server_debug, __FUNCTION__ " -->\n");
+
+ return 0;
}
/*
@@ -419,9 +406,8 @@ static void register_irlpt_server(void)
/*
* First register control TSAP
*/
-
if ( !irlpt_server || irlpt_server->magic != IRLPT_MAGIC) {
- DEBUG( 0, "irlpt_register_server:, unable to obtain handle!\n");
+ DEBUG( 0, __FUNCTION__ ": unable to obtain handle!\n");
return;
}
@@ -432,12 +418,13 @@ static void register_irlpt_server(void)
notify.disconnect_indication = irlpt_server_disconnect_indication;
notify.data_indication = irlpt_server_data_indication;
notify.instance = irlpt_server;
- strcpy(notify.name, "IrLPT");
+ strcpy(notify.name, "IrLPT server");
irlpt_server->lsap = irlmp_open_lsap( irlpt_server_lsap, &notify);
irlpt_server->connected = IRLPT_WAITING;
irlpt_server->service_LSAP = irlpt_server_lsap;
+ irlpt_server->slsap_sel = irlpt_server_lsap;
/*
* Register with LM-IAS
@@ -461,35 +448,9 @@ static void register_irlpt_server(void)
*/
static void deregister_irlpt_server(void)
{
-#if 0
- struct notify_t notify;
-#endif
DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
-#if 0
- /*
- * First register control TSAP
- */
-
- if ( !irlpt_server || irlpt_server->magic != IRLPT_MAGIC) {
- DEBUG( 0, "irlpt_register_server:, unable to obtain handle!\n");
- return;
- }
-
- irda_notify_init(&notify);
-
- notify.connect_confirm = irlpt_server_connect_confirm;
- notify.connect_indication = irlpt_server_connect_indication;
- notify.disconnect_indication = irlpt_server_disconnect_indication;
- notify.data_indication = irlpt_server_data_indication;
- notify.instance = irlpt_server;
- strcpy(notify.name, "IrLPT");
-
- irlpt_server->lsap = irlmp_open_lsap( irlpt_server_lsap, &notify);
-
- irlpt_server->connected = IRLPT_WAITING;
- irlpt_server->service_LSAP = irlpt_server_lsap;
-#endif
+ irlpt_server->connected = IRLPT_DISCONNECTED;
/*
* de-Register with LM-IAS
@@ -504,7 +465,9 @@ static void deregister_irlpt_server(void)
#ifdef MODULE
MODULE_AUTHOR("Thomas Davis <ratbert@radiks.net>");
-MODULE_DESCRIPTION("The Linux IrDA/IrLPT protocol");
+MODULE_DESCRIPTION("The Linux IrDA/IrLPT server protocol");
+MODULE_PARM(irlpt_server_debug,"1i");
+MODULE_PARM(irlpt_server_fsm_debug, "1i");
/*
* Function init_module (void)
@@ -514,14 +477,15 @@ MODULE_DESCRIPTION("The Linux IrDA/IrLPT protocol");
*/
int init_module(void)
{
+ int ret;
- DEBUG( irlpt_server_debug, "--> irlpt server: init_module\n");
+ DEBUG( irlpt_server_debug, "--> IrLPT server: init_module\n");
- irlpt_server_init();
+ ret = irlpt_server_init();
- DEBUG( irlpt_server_debug, "irlpt server: init_module -->\n");
+ DEBUG( irlpt_server_debug, "IrLPT server: init_module -->\n");
- return 0;
+ return ret;
}
/*
@@ -532,14 +496,14 @@ int init_module(void)
*/
void cleanup_module(void)
{
- DEBUG( irlpt_server_debug, "--> " __FUNCTION__ "\n");
- DEBUG( 3, "--> irlpt server: cleanup_module\n");
+ DEBUG( irlpt_server_debug, "--> IrLPT server: cleanup_module\n");
+
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
/* Free some memory */
irlpt_server_cleanup();
- DEBUG( irlpt_server_debug, "irlpt server: cleanup_module -->\n");
+ DEBUG( irlpt_server_debug, "IrLPT server: cleanup_module -->\n");
}
#endif /* MODULE */
diff --git a/net/irda/irlpt/irlpt_srvr_fsm.c b/net/irda/irlpt/irlpt_srvr_fsm.c
index 3640a9677..d37d98b7c 100644
--- a/net/irda/irlpt/irlpt_srvr_fsm.c
+++ b/net/irda/irlpt/irlpt_srvr_fsm.c
@@ -4,8 +4,10 @@
* Version: 0.1
* Sources: irlan_event.c
*
- * Copyright (c) 1997, Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
- * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>, All Rights Reserved.
+ * Copyright (c) 1997, Dag Brattli <dagb@cs.uit.no>,
+ * All Rights Reserved.
+ * Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>,
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -38,9 +40,10 @@ static int irlpt_server_state_conn ( struct irlpt_cb *self,
static char *rcsid = "$Id: irlpt_server_fsm.c,v 1.4 1998/10/05 05:46:45 ratbert Exp $";
#endif
-int irlpt_server_fsm_debug = 3;
+int irlpt_server_fsm_debug = 4; /* don't change this! */
-static int (*irlpt_server_state[])( struct irlpt_cb *self, IRLPT_EVENT event,
+static int (*irlpt_server_state[])( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info) =
{
@@ -48,10 +51,13 @@ static int (*irlpt_server_state[])( struct irlpt_cb *self, IRLPT_EVENT event,
irlpt_server_state_conn,
};
-void irlpt_server_do_event( struct irlpt_cb *self, IRLPT_EVENT event,
- struct sk_buff *skb, struct irlpt_info *info)
+void irlpt_server_do_event( struct irlpt_cb *self,
+ IRLPT_EVENT event,
+ struct sk_buff *skb,
+ struct irlpt_info *info)
{
- DEBUG( irlpt_server_fsm_debug, __FUNCTION__ ": STATE = %s, EVENT = %s\n",
+ DEBUG( irlpt_server_fsm_debug,
+ __FUNCTION__ ": STATE = %s, EVENT = %s\n",
irlpt_server_fsm_state[self->state], irlpt_fsm_event[event]);
ASSERT( self != NULL, return;);
@@ -62,7 +68,8 @@ void irlpt_server_do_event( struct irlpt_cb *self, IRLPT_EVENT event,
DEBUG( irlpt_server_fsm_debug, __FUNCTION__ " -->\n");
}
-void irlpt_server_next_state( struct irlpt_cb *self, IRLPT_CLIENT_STATE state)
+void irlpt_server_next_state( struct irlpt_cb *self,
+ IRLPT_CLIENT_STATE state)
{
DEBUG( irlpt_server_fsm_debug, __FUNCTION__ ": NEXT STATE = %s\n",
@@ -82,7 +89,8 @@ void irlpt_server_next_state( struct irlpt_cb *self, IRLPT_CLIENT_STATE state)
* IDLE, We are waiting for an indication that there is a provider
* available.
*/
-static int irlpt_server_state_idle( struct irlpt_cb *self, IRLPT_EVENT event,
+static int irlpt_server_state_idle( struct irlpt_cb *self,
+ IRLPT_EVENT event,
struct sk_buff *skb,
struct irlpt_info *info)
{
@@ -96,10 +104,12 @@ static int irlpt_server_state_idle( struct irlpt_cb *self, IRLPT_EVENT event,
switch( event) {
case LMP_CONNECT:
DEBUG( irlpt_server_fsm_debug, __FUNCTION__
- ": LM_CONNECT, remote lsap=%d\n",
- info->dlsap_sel);
+ ": LM_CONNECT, remote lsap: 0x%08x\n",
+ info->lsap->dlsap_sel);
- self->dlsap_sel = info->dlsap_sel;
+ self->dlsap_sel = info->lsap->dlsap_sel;
+ self->daddr = info->daddr;
+ self->saddr = info->saddr;
r_skb = dev_alloc_skb(64);
if (r_skb == NULL) {
@@ -107,12 +117,9 @@ static int irlpt_server_state_idle( struct irlpt_cb *self, IRLPT_EVENT event,
": can't allocate sk_buff of length 64\n");
return 0;
}
- ALLOC_SKB_MAGIC(r_skb);
skb_reserve( r_skb, LMP_MAX_HEADER);
- skb->len = 0;
irlmp_connect_response( self->lsap, r_skb);
irlpt_server_next_state( self, IRLPT_SERVER_CONN);
-
break;
default:
diff --git a/net/irda/irmod.c b/net/irda/irmod.c
index c009d6858..88d61c2cd 100644
--- a/net/irda/irmod.c
+++ b/net/irda/irmod.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Dec 15 13:55:39 1997
- * Modified at: Tue Jan 19 23:34:18 1999
+ * Modified at: Mon Apr 12 11:31:01 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997 Dag Brattli, All Rights Reserved.
@@ -24,31 +24,44 @@
#include <linux/config.h>
#include <linux/module.h>
+
#include <linux/init.h>
-#include <asm/segment.h>
#include <linux/poll.h>
+#include <asm/segment.h>
+
+#include <linux/proc_fs.h>
#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
+#include <net/irda/irlap.h>
+#ifdef CONFIG_IRDA_COMPRESSION
+#include <net/irda/irlap_comp.h>
+#endif /* CONFIG_IRDA_COMPRESSION */
#include <net/irda/irlmp.h>
#include <net/irda/iriap.h>
+#include <net/irda/irias_object.h>
#include <net/irda/irttp.h>
+#include <net/irda/irda_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/timer.h>
+
+extern struct proc_dir_entry proc_irda;
struct irda_cb irda; /* One global instance */
#ifdef CONFIG_IRDA_DEBUG
-__u32 irda_debug = IRDA_DEBUG;
+__u32 irda_debug = IRDA_DEBUG_LEVEL;
#endif
extern void irda_proc_register(void);
extern void irda_proc_unregister(void);
-extern int irda_sysctl_register(void);
+extern int irda_sysctl_register(void);
extern void irda_sysctl_unregister(void);
extern void irda_proto_init(struct net_proto *pro);
extern void irda_proto_cleanup(void);
extern int irda_device_init(void);
-extern int irobex_init(void);
extern int irlan_init(void);
extern int irlan_client_init(void);
extern int irlan_server_init(void);
@@ -57,15 +70,21 @@ extern int irvtd_init(void);
extern int irlpt_client_init(void);
extern int irlpt_server_init(void);
-static int irda_open( struct inode * inode, struct file *file);
-static int irda_ioctl( struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-static int irda_close( struct inode *inode, struct file *file);
-static ssize_t irda_read( struct file *file, char *buffer, size_t count,
- loff_t *noidea);
-static ssize_t irda_write( struct file *file, const char *buffer,
- size_t count, loff_t *noidea);
-static u_int irda_poll( struct file *file, poll_table *wait);
+#ifdef CONFIG_IRDA_COMPRESSION
+#ifdef CONFIG_IRDA_DEFLATE
+extern irda_deflate_init();
+#endif /* CONFIG_IRDA_DEFLATE */
+#endif /* CONFIG_IRDA_COMPRESSION */
+
+static int irda_open(struct inode * inode, struct file *file);
+static int irda_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+static int irda_close(struct inode *inode, struct file *file);
+static ssize_t irda_read(struct file *file, char *buffer, size_t count,
+ loff_t *noidea);
+static ssize_t irda_write(struct file *file, const char *buffer,
+ size_t count, loff_t *noidea);
+static u_int irda_poll(struct file *file, poll_table *wait);
static struct file_operations irda_fops = {
NULL, /* seek */
@@ -82,13 +101,99 @@ static struct file_operations irda_fops = {
NULL, /* fasync */
};
+/* IrTTP */
+EXPORT_SYMBOL(irttp_open_tsap);
+EXPORT_SYMBOL(irttp_close_tsap);
+EXPORT_SYMBOL(irttp_connect_response);
+EXPORT_SYMBOL(irttp_data_request);
+EXPORT_SYMBOL(irttp_disconnect_request);
+EXPORT_SYMBOL(irttp_flow_request);
+EXPORT_SYMBOL(irttp_connect_request);
+EXPORT_SYMBOL(irttp_udata_request);
+
+/* Main IrDA module */
+#ifdef CONFIG_IRDA_DEBUG
+EXPORT_SYMBOL(irda_debug);
+#endif
+EXPORT_SYMBOL(irda_notify_init);
+EXPORT_SYMBOL(irmanager_notify);
+EXPORT_SYMBOL(irda_lock);
+EXPORT_SYMBOL(proc_irda);
+
+/* IrIAP/IrIAS */
+EXPORT_SYMBOL(iriap_getvaluebyclass_request);
+EXPORT_SYMBOL(irias_object_change_attribute);
+EXPORT_SYMBOL(irias_add_integer_attrib);
+EXPORT_SYMBOL(irias_add_octseq_attrib);
+EXPORT_SYMBOL(irias_add_string_attrib);
+EXPORT_SYMBOL(irias_insert_object);
+EXPORT_SYMBOL(irias_new_object);
+EXPORT_SYMBOL(irias_delete_object);
+EXPORT_SYMBOL(irias_find_object);
+EXPORT_SYMBOL(irias_find_attrib);
+EXPORT_SYMBOL(irias_new_integer_value);
+EXPORT_SYMBOL(irias_new_string_value);
+EXPORT_SYMBOL(irias_new_octseq_value);
+
+/* IrLMP */
+EXPORT_SYMBOL(irlmp_discovery_request);
+EXPORT_SYMBOL(irlmp_register_client);
+EXPORT_SYMBOL(irlmp_unregister_client);
+EXPORT_SYMBOL(irlmp_update_client);
+EXPORT_SYMBOL(irlmp_register_service);
+EXPORT_SYMBOL(irlmp_unregister_service);
+EXPORT_SYMBOL(irlmp_service_to_hint);
+EXPORT_SYMBOL(irlmp_data_request);
+EXPORT_SYMBOL(irlmp_open_lsap);
+EXPORT_SYMBOL(irlmp_close_lsap);
+EXPORT_SYMBOL(irlmp_connect_request);
+EXPORT_SYMBOL(irlmp_connect_response);
+EXPORT_SYMBOL(irlmp_disconnect_request);
+EXPORT_SYMBOL(irlmp_get_daddr);
+EXPORT_SYMBOL(irlmp_get_saddr);
+EXPORT_SYMBOL(lmp_reasons);
+
+/* Queue */
+EXPORT_SYMBOL(hashbin_find);
+EXPORT_SYMBOL(hashbin_new);
+EXPORT_SYMBOL(hashbin_insert);
+EXPORT_SYMBOL(hashbin_delete);
+EXPORT_SYMBOL(hashbin_remove);
+EXPORT_SYMBOL(hashbin_get_next);
+EXPORT_SYMBOL(hashbin_get_first);
+
+/* IrLAP */
+#ifdef CONFIG_IRDA_COMPRESSION
+EXPORT_SYMBOL(irda_unregister_compressor);
+EXPORT_SYMBOL(irda_register_compressor);
+#endif /* CONFIG_IRDA_COMPRESSION */
+EXPORT_SYMBOL(irda_init_max_qos_capabilies);
+EXPORT_SYMBOL(irda_qos_bits_to_value);
+EXPORT_SYMBOL(irda_device_open);
+EXPORT_SYMBOL(irda_device_close);
+EXPORT_SYMBOL(irda_device_setup);
+EXPORT_SYMBOL(irda_device_set_media_busy);
+EXPORT_SYMBOL(irda_device_txqueue_empty);
+EXPORT_SYMBOL(async_wrap_skb);
+EXPORT_SYMBOL(async_unwrap_char);
+EXPORT_SYMBOL(irda_start_timer);
+EXPORT_SYMBOL(irda_get_mtt);
+EXPORT_SYMBOL(setup_dma);
+
+#ifdef CONFIG_IRTTY
+EXPORT_SYMBOL(irtty_set_dtr_rts);
+EXPORT_SYMBOL(irtty_register_dongle);
+EXPORT_SYMBOL(irtty_unregister_dongle);
+#endif
+
__initfunc(int irda_init(void))
{
- printk( KERN_INFO "Linux Support for the IrDA (tm) protocols (Dag Brattli)\n");
+ printk(KERN_INFO "IrDA (tm) Protocols for Linux-2.2 (Dag Brattli)\n");
- irda_device_init();
- irlap_init();
irlmp_init();
+ irlap_init();
+ irda_device_init();
+
iriap_init();
irttp_init();
@@ -113,15 +218,6 @@ __initfunc(int irda_init(void))
#ifdef CONFIG_IRLAN
irlan_init();
#endif
-#ifdef CONFIG_IRLAN_CLIENT
- irlan_client_init();
-#endif
-#ifdef CONFIG_IRLAN_SERVER
- irlan_server_init();
-#endif
-#ifdef CONFIG_IROBEX
- irobex_init();
-#endif
#ifdef CONFIG_IRCOMM
ircomm_init();
irvtd_init();
@@ -135,9 +231,16 @@ __initfunc(int irda_init(void))
irlpt_server_init();
#endif
+#ifdef CONFIG_IRDA_COMPRESSION
+#ifdef CONFIG_IRDA_DEFLATE
+ irda_deflate_init();
+#endif /* CONFIG_IRDA_DEFLATE */
+#endif /* CONFIG_IRDA_COMPRESSION */
+
return 0;
}
+#ifdef MODULE
void irda_cleanup(void)
{
misc_deregister( &irda.dev);
@@ -161,20 +264,7 @@ void irda_cleanup(void)
/* Remove middle layer */
irlmp_cleanup();
}
-
-/*
- * Function irda_lock (lock)
- *
- * Lock variable. Returns false if the lock is already set.
- *
- */
-inline int irda_lock( int *lock) {
- if ( test_and_set_bit( 0, (void *) lock)) {
- printk("Trying to lock, already locked variable!\n");
- return FALSE;
- }
- return TRUE;
-}
+#endif /* MODULE */
/*
* Function irda_unlock (lock)
@@ -182,8 +272,9 @@ inline int irda_lock( int *lock) {
* Unlock variable. Returns false if lock is already unlocked
*
*/
-inline int irda_unlock( int *lock) {
- if ( !test_and_clear_bit( 0, (void *) lock)) {
+inline int irda_unlock(int *lock)
+{
+ if (!test_and_clear_bit(0, (void *) lock)) {
printk("Trying to unlock already unlocked variable!\n");
return FALSE;
}
@@ -198,7 +289,6 @@ inline int irda_unlock( int *lock) {
*/
void irda_notify_init( struct notify_t *notify)
{
-
notify->data_indication = NULL;
notify->udata_indication = NULL;
notify->connect_confirm = NULL;
@@ -401,10 +491,34 @@ static u_int irda_poll( struct file *file, poll_table *wait)
return 0;
}
+void irda_mod_inc_use_count(void)
+{
#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+}
+
+void irda_mod_dec_use_count(void)
+{
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+}
+
+#ifdef MODULE
+#ifdef CONFIG_PROC_FS
+void irda_proc_modcount(struct inode *inode, int fill)
+{
+ if (fill)
+ MOD_INC_USE_COUNT;
+ else
+ MOD_DEC_USE_COUNT;
+}
+#endif /* CONFIG_PROC_FS */
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
MODULE_DESCRIPTION("The Linux IrDA protocol subsystem");
+MODULE_PARM(irda_debug, "1l");
/*
* Function init_module (void)
@@ -429,5 +543,4 @@ void cleanup_module(void)
{
irda_proto_cleanup();
}
-
-#endif
+#endif /* MODULE */
diff --git a/net/irda/irobex/Config.in b/net/irda/irobex/Config.in
deleted file mode 100644
index 7e6f71b45..000000000
--- a/net/irda/irobex/Config.in
+++ /dev/null
@@ -1,3 +0,0 @@
-
-dep_tristate 'IrOBEX protocol' CONFIG_IROBEX $CONFIG_IRDA
-
diff --git a/net/irda/irobex/Makefile b/net/irda/irobex/Makefile
deleted file mode 100644
index 1b028937b..000000000
--- a/net/irda/irobex/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Makefile for the Linux IrDA IrOBEX protocol layer.
-#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definition is now in the main makefile...
-
-#O_TARGET :=
-O_OBJS := irobex.o
-M_OBJS := irobex.o
-
-OX_OBJS +=
-
-include $(TOPDIR)/Rules.make
-
-tar:
- tar -cvf /dev/f1 .
diff --git a/net/irda/irobex/irobex.c b/net/irda/irobex/irobex.c
deleted file mode 100644
index 8b289182d..000000000
--- a/net/irda/irobex/irobex.c
+++ /dev/null
@@ -1,1119 +0,0 @@
-/*********************************************************************
- *
- * Filename: irobex.c
- * Version: 0.3
- * Description: Kernel side of the IrOBEX layer
- * Status: Experimental.
- * Author: Dag Brattli <dagb@cs.uit.no>
- * Created at: Thu Jun 25 21:21:07 1998
- * Modified at: Sat Jan 16 22:18:03 1999
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1998 Dag Brattli, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * Neither Dag Brattli nor University of Tromsø admit liability nor
- * provide warranty for any of this software. This material is
- * provided "AS-IS" and at no charge.
- *
- ********************************************************************/
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/ioctl.h>
-#include <linux/init.h>
-
-#include <asm/byteorder.h>
-#include <asm/segment.h>
-#include <asm/uaccess.h>
-#include <linux/poll.h>
-
-#include <net/irda/irttp.h>
-#include <net/irda/irias_object.h>
-#include <net/irda/iriap.h>
-
-#include <net/irda/irobex.h>
-
-/*
- * Master structure, only one instance for now!!
- */
-struct irobex_cb *irobex;
-
-char *irobex_state[] = {
- "OBEX_IDLE",
- "OBEX_DISCOVER",
- "OBEX_QUERY",
- "OBEX_CONN",
- "OBEX_DATA",
-};
-
-static int irobex_dev_open( struct inode * inode, struct file *file);
-static int irobex_ioctl( struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-static int irobex_dev_close( struct inode *inode, struct file *file);
-static ssize_t irobex_read( struct file *file, char *buffer, size_t count,
- loff_t *noidea);
-static ssize_t irobex_write( struct file *file, const char *buffer,
- size_t count, loff_t *noidea);
-static loff_t irobex_seek( struct file *, loff_t, int);
-static u_int irobex_poll( struct file *file, poll_table *wait);
-static int irobex_fasync( int, struct file *, int);
-
-static struct file_operations irobex_fops = {
- irobex_seek, /* seek */
- irobex_read,
- irobex_write,
- NULL, /* readdir */
- irobex_poll, /* poll */
- irobex_ioctl, /* ioctl */
- NULL, /* mmap */
- irobex_dev_open,
- NULL,
- irobex_dev_close,
- NULL,
- irobex_fasync,
-};
-
-#ifdef CONFIG_PROC_FS
-static int irobex_proc_read( char *buf, char **start, off_t offset,
- int len, int unused);
-
-extern struct proc_dir_entry proc_irda;
-
-struct proc_dir_entry proc_irobex = {
- 0, 6, "irobex",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL,
- &irobex_proc_read,
-};
-#endif
-
-/*
- * Function irobex_init (dev)
- *
- * Initializes the irobex control structure, and registers as a misc
- * device
- *
- */
-__initfunc(int irobex_init(void))
-{
- struct irobex_cb *self;
-
- self = kmalloc(sizeof(struct irobex_cb), GFP_ATOMIC);
- if ( self == NULL)
- return -ENOMEM;
-
- memset( self, 0, sizeof(struct irobex_cb));
- sprintf( self->devname, "irobex%d", 0); /* Just one instance for now */
-
- self->magic = IROBEX_MAGIC;
- self->rx_flow = self->tx_flow = FLOW_START;
-
- self->dev.minor = MISC_DYNAMIC_MINOR;
- self->dev.name = "irobex";
- self->dev.fops = &irobex_fops;
-
- skb_queue_head_init( &self->rx_queue);
- init_timer( &self->watchdog_timer);
-
- irobex = self;
-
- misc_register( &self->dev);
-
-#ifdef CONFIG_PROC_FS
- proc_register( &proc_irda, &proc_irobex);
-#endif /* CONFIG_PROC_FS */
-
- irlmp_register_layer( S_OBEX, CLIENT | SERVER, TRUE,
- irobex_discovery_indication);
-
- return 0;
-}
-
-/*
- * Function irobex_cleanup (void)
- *
- * Removes the IrOBEX layer
- *
- */
-#ifdef MODULE
-void irobex_cleanup(void)
-{
- struct sk_buff *skb;
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = irobex;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- /*
- * Deregister client and server
- */
- irlmp_unregister_layer( S_OBEX, CLIENT | SERVER);
-
- if ( self->tsap) {
- irttp_close_tsap( self->tsap);
- self->tsap = NULL;
- }
-
- /* Stop timers */
- del_timer( &self->watchdog_timer);
-
- /*
- * Deallocate buffers
- */
- while (( skb = skb_dequeue( &self->rx_queue)) != NULL)
- dev_kfree_skb( skb);
-
-#ifdef CONFIG_PROC_FS
- proc_unregister( &proc_irda, proc_irobex.low_ino);
-#endif
-
- misc_deregister( &self->dev);
-
- kfree( self);
-}
-#endif /* MODULE */
-
-/*
- * Function irobex_read (inode, file, buffer, count)
- *
- * User process wants to read some data
- *
- */
-static ssize_t irobex_read( struct file *file, char *buffer, size_t count,
- loff_t *noidea)
-{
- int len=0;
- struct irobex_cb *self;
- struct sk_buff *skb = NULL;
- int ret;
-
- self = irobex;
-
- ASSERT( self != NULL, return -EIO;);
- ASSERT( self->magic == IROBEX_MAGIC, return -EIO;);
-
- DEBUG( 4, __FUNCTION__ ": count=%d, skb_len=%d, state=%s, eof=%d\n",
- count, skb_queue_len( &self->rx_queue),
- irobex_state[self->state],
- self->eof);
-
- if ( self->state != OBEX_DATA) {
- DEBUG( 0, __FUNCTION__ "(), link not connected yet!\n");
- return -EIO;
- }
-
- /*
- * If there is data to return, then we return it. If not, then we
- * must check if we are still connected
- */
- if ( skb_queue_len( &self->rx_queue) == 0) {
-
- /* Still connected? */
- if ( self->state != OBEX_DATA) {
- switch ( self->eof) {
- case LM_USER_REQUEST:
- self->eof = FALSE;
- DEBUG(3, "read_irobex: returning 0\n");
- ret = 0;
- break;
- case LM_LAP_DISCONNECT:
- self->eof = FALSE;
- ret = -EIO;
- break;
- case LM_LAP_RESET:
- self->eof = FALSE;
- ret = -ECONNRESET;
- break;
- default:
- self->eof = FALSE;
- ret = -EIO;
- break;
- }
- return ret;
- }
-
- /* Return if user does not want to block */
- if ( file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- /* Go to sleep and wait for data! */
- interruptible_sleep_on( &self->read_wait);
-
- /*
- * Ensure proper reaction to signals, and screen out
- * blocked signals (page 112. linux device drivers)
- */
- if ( signal_pending( current))
- return -ERESTARTSYS;
- }
-
- while ( count && skb_queue_len( &self->rx_queue)) {
-
- skb = skb_dequeue( &self->rx_queue);
-
- /*
- * Check if we have previously stopped IrTTP and we know
- * have more free space in our rx_queue. If so tell IrTTP
- * to start delivering frames again before our rx_queue gets
- * empty
- */
- if ( self->rx_flow == FLOW_STOP) {
- if ( skb_queue_len( &self->rx_queue) < LOW_THRESHOLD) {
- DEBUG( 4, __FUNCTION__ "(), Starting IrTTP\n");
- self->rx_flow = FLOW_START;
- irttp_flow_request( self->tsap, FLOW_START);
- }
- }
-
- /*
- * Is the request from the user less that the amount in the
- * current packet?
- */
- if ( count < skb->len) {
- copy_to_user( buffer+len, skb->data, count);
- len += count;
-
- /*
- * Remove copied data from skb and queue
- * it for next read
- */
- skb_pull( skb, count);
- skb_queue_head( &self->rx_queue, skb);
-
- return len;
- } else {
- copy_to_user( buffer+len, skb->data, skb->len);
- count -= skb->len;
- len += skb->len;
-
- dev_kfree_skb( skb);
- }
- }
- return len;
-}
-
-/*
- * Function irobex_write (inode, file, buffer, count)
- *
- * User process wants to write to device
- *
- */
-static ssize_t irobex_write( struct file *file, const char *buffer,
- size_t count, loff_t *noidea)
-{
- struct irobex_cb *self;
- struct sk_buff *skb;
- int data_len = 0;
- int len = 0;
-
- self = irobex;
-
- ASSERT( self != NULL, return -EIO;);
- ASSERT( self->magic == IROBEX_MAGIC, return -EIO;);
-
- DEBUG( 4, __FUNCTION__ ": count = %d\n", count);
-
- /*
- * If we are not connected then we just give up!
- */
- if ( self->state != OBEX_DATA) {
- DEBUG( 0, __FUNCTION__ "(): Not connected!\n");
-
- return -ENOLINK;
- }
-
- /* Check if IrTTP is wants us to slow down */
- if ( self->tx_flow == FLOW_STOP) {
- DEBUG( 4, __FUNCTION__
- "(), IrTTP wants us to slow down, going to sleep\n");
- interruptible_sleep_on( &self->write_wait);
- }
-
- /* Send data to TTP layer possibly as muliple packets */
- while ( count) {
-
- /*
- * Check if request is larger than what fits inside a TTP
- * frame. In that case we must fragment the frame into
- * multiple TTP frames. IrOBEX should not care about message
- * boundaries.
- */
- if ( count < (self->irlap_data_size - IROBEX_MAX_HEADER))
- data_len = count;
- else
- data_len = self->irlap_data_size - IROBEX_MAX_HEADER;
-
- DEBUG( 4, __FUNCTION__ "(), data_len=%d, header_len = %d\n",
- data_len, IROBEX_MAX_HEADER);
-
- skb = dev_alloc_skb( data_len + IROBEX_MAX_HEADER);
- if ( skb == NULL) {
- DEBUG( 0, "irobex - couldn't allocate skbuff!\n");
- return 0;
- }
-
- skb_reserve( skb, IROBEX_MAX_HEADER);
- skb_put( skb, data_len);
-
- copy_from_user( skb->data, buffer+len, data_len);
- len += data_len;
- count -= data_len;
-
- DEBUG( 4, __FUNCTION__ "(), skb->len=%d\n", (int) skb->len);
- ASSERT( skb->len <= (self->irlap_data_size-IROBEX_MAX_HEADER),
- return len;);
-
- irttp_data_request( self->tsap, skb);
- }
- return (len);
-}
-
-/*
- * Function irobex_poll (file, wait)
- *
- *
- *
- */
-static u_int irobex_poll(struct file *file, poll_table *wait)
-{
- DEBUG( 0, __FUNCTION__ "(), Sorry not implemented yet!\n");
-
- /* check out /usr/src/pcmcia/modules/ds.c for an example */
- return 0;
-}
-
-/*
- * Function irobex_fasync (inode, filp, mode)
- *
- * Implementation for SIGIO
- *
- */
-static int irobex_fasync( int fd, struct file *filp, int on)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = irobex;
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IROBEX_MAGIC, return -1;);
-
- return fasync_helper( fd, filp, on, &self->async);
-}
-
-/*
- * Function irobex_seek (inode, file, buffer, count)
- *
- * Not implemented yet!
- *
- */
-static loff_t irobex_seek( struct file *file, loff_t off, int whence)
-{
- DEBUG( 0, __FUNCTION__ "(), Not implemented yet!\n");
-
- return -ESPIPE;
-}
-
-/*
- * Function irobex_ioctl (inode, filp, cmd, arg)
- *
- * Drivers IOCTL handler, used for connecting and disconnecting
- * irobex connections
- *
- */
-static int irobex_ioctl( struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct irobex_cb *self;
- int err = 0;
- int size = _IOC_SIZE(cmd);
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = irobex;
-
- ASSERT( self != NULL, return -ENOTTY;);
- ASSERT( self->magic = IROBEX_MAGIC, return -ENOTTY;);
-
- if ( _IOC_TYPE(cmd) != IROBEX_IOC_MAGIC)
- return -EINVAL;
- if ( _IOC_NR(cmd) > IROBEX_IOC_MAXNR)
- return -EINVAL;
-
- if ( _IOC_DIR(cmd) & _IOC_READ)
- err = verify_area( VERIFY_WRITE, (void *) arg, size);
- else if ( _IOC_DIR(cmd) & _IOC_WRITE)
- err = verify_area( VERIFY_READ, (void *) arg, size);
- if ( err)
- return err;
-
- switch ( cmd) {
- case IROBEX_IOCSCONNECT:
- DEBUG( 4, __FUNCTION__ "(): IROBEX_IOCSCONNECT!\n");
-
- /* Already connected? */
- if ( self->state == OBEX_DATA) {
- DEBUG( 0, __FUNCTION__ "(), already connected!\n");
- return 0;
- }
-
- /* Timeout after 15 secs. */
- irobex_start_watchdog_timer( self, 1000);
-
- /*
- * If we have discovered a remote device we
- * check if the discovery is still fresh. If not, we don't
- * trust the address.
- */
- if ( self->daddr && ((jiffies - self->time_discovered) > 500))
- self->daddr = 0;
-
- /*
- * Try to discover remote remote device if it has not been
- * discovered yet.
- */
- if ( !self->daddr) {
- self->state = OBEX_DISCOVER;
-
- irlmp_discovery_request( 8);
-
- /* Wait for discovery to complete */
- interruptible_sleep_on( &self->write_wait);
- del_timer( &self->watchdog_timer);
- }
-
- /* Give up if we are unable to discover any remote devices */
- if ( !self->daddr) {
- DEBUG( 0, __FUNCTION__
- "(), Unable to discover any devices!\n");
- return -ENOTTY;
- }
-
- /* Need to find remote destination TSAP selector? */
- if ( !self->dtsap_sel) {
- DEBUG( 0, __FUNCTION__ "() : Quering remote IAS!\n");
-
- self->state = OBEX_QUERY;
-
- /* Timeout after 5 secs. */
- irobex_start_watchdog_timer( self, 500);
- iriap_getvaluebyclass_request(
- self->daddr,
- "OBEX",
- "IrDA:TinyTP:LsapSel",
- irobex_get_value_confirm,
- self);
-
- interruptible_sleep_on( &self->write_wait);
- del_timer( &self->watchdog_timer);
- }
-
- if ( !self->dtsap_sel) {
- DEBUG( 0, __FUNCTION__
- "(), Unable to query remote LM-IAS!\n");
- return -ENOTTY;
- }
-
- self->state = OBEX_CONN;
-
- /* Timeout after 5 secs. */
- irobex_start_watchdog_timer( self, 500);
-
- irttp_connect_request( self->tsap, self->dtsap_sel,
- self->daddr, NULL, SAR_DISABLE,
- NULL);
-
- /* Go to sleep and wait for connection! */
- interruptible_sleep_on( &self->write_wait);
- del_timer( &self->watchdog_timer);
-
- if ( self->state != OBEX_DATA) {
- DEBUG( 0, __FUNCTION__
- "(), Unable to connect to remote device!\n");
- return -ENOTTY;
- }
-
- break;
- case IROBEX_IOCSDISCONNECT:
- DEBUG( 4, __FUNCTION__ "(): IROBEX_IOCSDISCONNECT!\n");
-
- if ( self->state != OBEX_DATA)
- return 0;
-
- irttp_disconnect_request( self->tsap, NULL, P_NORMAL);
-
- /* Reset values for this instance */
- self->state = OBEX_IDLE;
- self->eof = LM_USER_REQUEST;
- self->daddr = 0;
- self->dtsap_sel = 0;
- self->rx_flow = FLOW_START;
- self->tx_flow = FLOW_START;
-
- wake_up_interruptible( &self->read_wait);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Function irobex_dev_open (inode, file)
- *
- * Device opened by user process
- *
- */
-static int irobex_dev_open( struct inode * inode, struct file *file)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = irobex;
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IROBEX_MAGIC, return -1;);
-
- if ( self->count++) {
- DEBUG( 3, "open_irobex: count not zero; actual = %d\n",
- self->count);
- self->count--;
- return -EBUSY;
- }
-
- irobex_register_server( self);
-
- /* Reset values for this instance */
- self->state = OBEX_IDLE;
- self->eof = FALSE;
- self->daddr = 0;
- self->dtsap_sel = 0;
- self->rx_flow = FLOW_START;
- self->tx_flow = FLOW_START;
-
- MOD_INC_USE_COUNT;
-
- return 0;
-}
-
-static int irobex_dev_close( struct inode *inode, struct file *file)
-{
- struct irobex_cb *self;
- struct sk_buff *skb;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = irobex;
-
- ASSERT( self != NULL, return -ENODEV;);
- ASSERT( self->magic == IROBEX_MAGIC, return -EBADR;);
-
- /* Deallocate buffers */
- while (( skb = skb_dequeue( &self->rx_queue)) != NULL) {
- DEBUG( 3, "irobex_close: freeing SKB\n");
- dev_kfree_skb( skb);
- }
-
- /* Close TSAP is its still there */
- if ( self->tsap) {
- irttp_close_tsap( self->tsap);
- self->tsap = NULL;
- }
- self->state = OBEX_IDLE;
- self->eof = FALSE;
- self->daddr = 0;
- self->dtsap_sel = 0;
- self->rx_flow = FLOW_START;
- self->tx_flow = FLOW_START;
-
- /* Remove this filp from the asynchronously notified filp's */
- irobex_fasync( -1, file, 0);
-
- self->count--;
-
- MOD_DEC_USE_COUNT;
-
- return 0;
-}
-
-/*
- * Function irobex_discovery_inication (daddr)
- *
- * Remote device discovered, try query the remote IAS to see which
- * device it is, and which services it has.
- *
- */
-void irobex_discovery_indication( DISCOVERY *discovery)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = irobex;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- /* Remember address and time if was discovered */
- self->daddr = discovery->daddr;
- self->time_discovered = jiffies;
-
- /* Wake up process if its waiting for device to be discovered */
- if ( self->state == OBEX_DISCOVER)
- wake_up_interruptible( &self->write_wait);
-}
-
-/*
- * Function irobex_disconnect_indication (handle, reason, priv)
- *
- * Link has been disconnected
- *
- */
-void irobex_disconnect_indication( void *instance, void *sap,
- LM_REASON reason, struct sk_buff *userdata)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "(), reason=%d\n", reason);
-
- self = ( struct irobex_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- self->state = OBEX_IDLE;
- self->eof = reason;
- self->daddr = 0;
- self->dtsap_sel = 0;
- self->rx_flow = self->tx_flow = FLOW_START;
-
- wake_up_interruptible( &self->read_wait);
- wake_up_interruptible( &self->write_wait);
-
- DEBUG( 4, __FUNCTION__ "(), skb_queue_len=%d\n",
- skb_queue_len( &irobex->rx_queue));
-
- if ( userdata)
- dev_kfree_skb( userdata);
-}
-
-/*
- * Function irobex_connect_confirm (instance, sap, qos, userdata)
- *
- * Connection to peer IrOBEX layer established
- *
- */
-void irobex_connect_confirm( void *instance, void *sap, struct qos_info *qos,
- int max_sdu_size, struct sk_buff *userdata)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irobex_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
- ASSERT( qos != NULL, return;);
-
- DEBUG( 4, __FUNCTION__ "(), IrLAP data size=%d\n",
- qos->data_size.value);
-
- self->irlap_data_size = qos->data_size.value;
-
- /*
- * Wake up any blocked process wanting to write. Finally this process
- * can start writing since the connection is now open :-)
- */
- if (self->state == OBEX_CONN) {
- self->state = OBEX_DATA;
- wake_up_interruptible( &self->write_wait);
- }
-
- if ( userdata) {
- dev_kfree_skb( userdata);
-
- }
-}
-
-/*
- * Function irobex_connect_response (handle)
- *
- * Accept incomming connection
- *
- */
-void irobex_connect_response( struct irobex_cb *self)
-{
- struct sk_buff *skb;
-/* __u8 *frame; */
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- self->state = OBEX_DATA;
-
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__ "() Could not allocate sk_buff!\n");
- return;
- }
-
- /* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve( skb, TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER);
-
- irttp_connect_response( self->tsap, SAR_DISABLE, skb);
-}
-
-/*
- * Function irobex_connect_indication (handle, skb, priv)
- *
- * Connection request from a remote device
- *
- */
-void irobex_connect_indication( void *instance, void *sap,
- struct qos_info *qos, int max_sdu_size,
- struct sk_buff *userdata)
-{
- struct irmanager_event mgr_event;
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irobex_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
- ASSERT( userdata != NULL, return;);
-
- self->eof = FALSE;
-
- DEBUG( 4, __FUNCTION__ "(), skb_len = %d\n",
- (int) userdata->len);
-
- DEBUG( 4, __FUNCTION__ "(), IrLAP data size=%d\n",
- qos->data_size.value);
-
- ASSERT( qos->data_size.value >= 64, return;);
-
- self->irlap_data_size = qos->data_size.value;
-
- /* We just accept the connection */
- irobex_connect_response( self);
-#if 1
- mgr_event.event = EVENT_IROBEX_START;
- sprintf( mgr_event.devname, "%s", self->devname);
- irmanager_notify( &mgr_event);
-#endif
- wake_up_interruptible( &self->read_wait);
-
- if ( userdata) {
- dev_kfree_skb( userdata);
- }
-}
-
-/*
- * Function irobex_data_indication (instance, sap, skb)
- *
- * This function gets the data that is received on the data channel
- *
- */
-void irobex_data_indication( void *instance, void *sap, struct sk_buff *skb)
-{
-
- struct irobex_cb *self;
-
- self = ( struct irobex_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- DEBUG( 4, __FUNCTION__ "(), len=%d\n", (int) skb->len);
-
- skb_queue_tail( &self->rx_queue, skb);
-
- /*
- * Check if queues are beginning to get filled, and inform
- * IrTTP to slow down if that is the case
- */
- if ( skb_queue_len( &self->rx_queue) > HIGH_THRESHOLD) {
- DEBUG( 0, __FUNCTION__
- "(), rx_queue is full, telling IrTTP to slow down\n");
- self->rx_flow = FLOW_STOP;
- irttp_flow_request( self->tsap, FLOW_STOP);
- }
-
- /*
- * Wake up process blocked on read or select
- */
- wake_up_interruptible( &self->read_wait);
-
- /* Send signal to asynchronous readers */
- if ( self->async)
- kill_fasync( self->async, SIGIO);
-}
-
-/*
- * Function irobex_flow_indication (instance, sap, cmd)
- *
- *
- *
- */
-void irobex_flow_indication( void *instance, void *sap, LOCAL_FLOW flow)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- self = ( struct irobex_cb *) instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- switch ( flow) {
- case FLOW_STOP:
- DEBUG( 0, __FUNCTION__ "(), IrTTP wants us to slow down\n");
- self->tx_flow = flow;
- break;
- case FLOW_START:
- self->tx_flow = flow;
- DEBUG( 0, __FUNCTION__ "(), IrTTP wants us to start again\n");
- wake_up_interruptible( &self->write_wait);
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), Unknown flow command!\n");
- }
-}
-
-/*
- * Function irobex_get_value_confirm (obj_id, value)
- *
- * Got results from previous GetValueByClass request
- *
- */
-void irobex_get_value_confirm( __u16 obj_id, struct ias_value *value,
- void *priv)
-{
- struct irobex_cb *self;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( priv != NULL, return;);
- self = ( struct irobex_cb *) priv;
-
- if ( !self || self->magic != IROBEX_MAGIC) {
- DEBUG( 0, "irobex_get_value_confirm: bad magic!\n");
- return;
- }
-
- switch ( value->type) {
- case IAS_INTEGER:
- DEBUG( 4, __FUNCTION__ "() int=%d\n", value->t.integer);
-
- if ( value->t.integer != -1) {
- self->dtsap_sel = value->t.integer;
-
- /*
- * Got the remote TSAP, so wake up any processes
- * blocking on write. We don't do the connect
- * ourselves since we must make sure there is a
- * process that wants to make a connection, so we
- * just let that process do the connect itself
- */
- if ( self->state == OBEX_QUERY)
- wake_up_interruptible( &self->write_wait);
- } else
- self->dtsap_sel = 0;
- break;
- case IAS_STRING:
- DEBUG( 0, __FUNCTION__ "(), got string %s\n", value->t.string);
- break;
- case IAS_OCT_SEQ:
- DEBUG( 0, __FUNCTION__ "(), OCT_SEQ not implemented\n");
- break;
- case IAS_MISSING:
- DEBUG( 0, __FUNCTION__ "(), MISSING not implemented\n");
- break;
- default:
- DEBUG( 0, __FUNCTION__ "(), unknown type!\n");
- break;
- }
-}
-
-/*
- * Function irobex_provider_confirm (dlsap)
- *
- * IrOBEX provider is discovered. We can now establish connections
- * TODO: This function is currently not used!
- */
-void irobex_provider_confirm( struct irobex_cb *self, __u8 dlsap)
-{
- /* struct irobex_cb *self = irobex; */
- struct notify_t notify;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- notify.data_indication = irobex_data_indication;
- notify.connect_confirm = irobex_connect_confirm;
- notify.connect_indication = irobex_connect_indication;
- notify.flow_indication = irobex_flow_indication;
- notify.disconnect_indication = irobex_disconnect_indication;
- notify.instance = self;
-
- /* Create TSAP's */
- self->tsap = irttp_open_tsap( LSAP_ANY, DEFAULT_INITIAL_CREDIT,
- &notify);
-
-/* DEBUG( 0, "OBEX allocated TSAP%d for data\n", self->handle); */
-
- /* irlan_do_event( IAS_PROVIDER_AVAIL, NULL, &frame); */
-}
-
-/*
- * Function irobex_register_server(void)
- *
- * Register server support so we can accept incomming connections. We
- * must register both a TSAP for control and data
- *
- */
-void irobex_register_server( struct irobex_cb *self)
-{
- struct notify_t notify;
- struct ias_object *obj;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- irda_notify_init( &notify);
-
- notify.connect_confirm = irobex_connect_confirm;
- notify.connect_indication = irobex_connect_indication;
- notify.disconnect_indication = irobex_disconnect_indication;
- notify.data_indication = irobex_data_indication;
- notify.flow_indication = irobex_flow_indication;
- notify.instance = self;
- strcpy( notify.name, "IrOBEX");
-
- self->tsap = irttp_open_tsap( TSAP_IROBEX, DEFAULT_INITIAL_CREDIT,
- &notify);
- if ( self->tsap == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Unable to allocate TSAP!\n");
- return;
- }
-
- /*
- * Register with LM-IAS
- */
- obj = irias_new_object( "OBEX", 0x42343);
- irias_add_integer_attrib( obj, "IrDA:TinyTP:LsapSel", TSAP_IROBEX);
- irias_insert_object( obj);
-}
-
-void irobex_watchdog_timer_expired( unsigned long data)
-{
- struct irobex_cb *self = ( struct irobex_cb *) data;
-
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == IROBEX_MAGIC, return;);
-
- switch (self->state) {
- case OBEX_CONN: /* FALLTROUGH */
- case OBEX_DISCOVER: /* FALLTROUGH */
- case OBEX_QUERY: /* FALLTROUGH */
- wake_up_interruptible( &self->write_wait);
- break;
- default:
- break;
- }
-}
-
-#ifdef CONFIG_PROC_FS
-/*
- * Function irobex_proc_read (buf, start, offset, len, unused)
- *
- * Give some info to the /proc file system
- */
-static int irobex_proc_read( char *buf, char **start, off_t offset,
- int len, int unused)
-{
- struct irobex_cb *self;
-
- self = irobex;
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == IROBEX_MAGIC, return -1;);
-
- len = 0;
-
- len += sprintf( buf+len, "ifname: %s ",self->devname);
- len += sprintf( buf+len, "state: %s ", irobex_state[ self->state]);
- len += sprintf( buf+len, "EOF: %s\n", self->eof ? "TRUE": "FALSE");
-
- return len;
-}
-
-#endif /* CONFIG_PROC_FS */
-
-#ifdef MODULE
-
-MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
-MODULE_DESCRIPTION("The Linux IrOBEX module");
-
-/*
- * Function init_module (void)
- *
- * Initialize the IrOBEX module, this function is called by the
- * modprobe(1) program.
- */
-int init_module(void)
-{
- irobex_init();
-
- return 0;
-}
-
-/*
- * Function cleanup_module (void)
- *
- * Remove the IrOBEX module, this function is called by the rmmod(1)
- * program
- */
-void cleanup_module(void)
-{
- /*
- * No need to check MOD_IN_USE, as sys_delete_module() checks.
- */
-
- /* Free some memory */
- irobex_cleanup();
-}
-
-#endif /* MODULE */
-
-
-
-
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index bc6a3ace5..f3b710b95 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irproc.c
- * Version:
+ * Version: 1.0
* Description: Various entries in the /proc file system
* Status: Experimental.
* Author: Thomas Davis, <ratbert@radiks.net>
* Created at: Sat Feb 21 21:33:24 1998
- * Modified at: Tue Dec 15 09:21:50 1998
+ * Modified at: Tue Apr 6 19:07:06 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998, Thomas Davis, <ratbert@radiks.net>,
@@ -27,31 +27,23 @@
#include <linux/miscdevice.h>
#include <linux/proc_fs.h>
+#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/irlap.h>
#include <net/irda/irlmp.h>
-static int proc_irda_21x_lookup(struct inode * dir, struct dentry *dentry);
-
-static int proc_irda_readdir(struct file *filp, void *dirent,
- filldir_t filldir);
-
-extern int irda_device_proc_read( char *buf, char **start, off_t offset,
- int len, int unused);
-extern int irlap_proc_read( char *buf, char **start, off_t offset, int len,
- int unused);
-extern int irlmp_proc_read( char *buf, char **start, off_t offset, int len,
- int unused);
-extern int irttp_proc_read( char *buf, char **start, off_t offset, int len,
- int unused);
-extern int irias_proc_read( char *buf, char **start, off_t offset, int len,
- int unused);
-
-static int proc_discovery_read( char *buf, char **start, off_t offset, int len,
- int unused);
-
-/* int proc_irda_readdir(struct inode *inode, struct file *filp, void *dirent, */
-/* filldir_t filldir); */
+extern int irda_device_proc_read(char *buf, char **start, off_t offset,
+ int len, int unused);
+extern int irlap_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused);
+extern int irlmp_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused);
+extern int irttp_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused);
+extern int irias_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused);
+extern int discovery_proc_read(char *buf, char **start, off_t offset, int len,
+ int unused);
enum irda_directory_inos {
PROC_IRDA_LAP = 1,
@@ -63,121 +55,26 @@ enum irda_directory_inos {
PROC_IRDA_IRIAS
};
-static struct file_operations proc_irda_dir_operations = {
- NULL, /* lseek - default */
- NULL, /* read - bad */
- NULL, /* write - bad */
- proc_irda_readdir, /* readdir */
- NULL, /* select - default */
- NULL, /* ioctl - default */
- NULL, /* mmap */
- NULL, /* no special open code */
- NULL, /* no special release code */
- NULL /* can't fsync */
-};
-
-/*
- * proc directories can do almost nothing..
- */
-struct inode_operations proc_irda_dir_inode_operations = {
- &proc_irda_dir_operations, /* default net directory file-ops */
- NULL, /* create */
- proc_irda_21x_lookup,
- NULL, /* link */
- NULL, /* unlink */
- NULL, /* symlink */
- NULL, /* mkdir */
- NULL, /* rmdir */
- NULL, /* mknod */
- NULL, /* rename */
- NULL, /* readlink */
- NULL, /* follow_link */
- NULL, /* readpage */
- NULL, /* writepage */
- NULL, /* bmap */
- NULL, /* truncate */
- NULL /* permission */
+struct irda_entry {
+ char *name;
+ int (*fn)(char*,char**,off_t,int,int);
};
-struct proc_dir_entry proc_irda = {
- 0, 4, "irda",
- S_IFDIR | S_IRUGO | S_IXUGO, 2, 0, 0,
- 0, &proc_irda_dir_inode_operations,
- NULL, NULL,
- NULL,
- NULL, NULL
-};
+struct proc_dir_entry *proc_irda;
+static struct irda_entry dir[] = {
#if 0
-struct proc_dir_entry proc_lpt = {
- 0, 3, "lpt",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irlpt_proc_read /* get_info */,
-};
+ {"lpt", irlpt_proc_read},
#endif
-
-struct proc_dir_entry proc_discovery = {
- 0, 9, "discovery",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &proc_discovery_read /* get_info */,
-};
-
-struct proc_dir_entry proc_irda_device = {
- 0, 11, "irda_device",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL,
- &irda_device_proc_read,
-};
-
-struct proc_dir_entry proc_ttp = {
- 0, 5, "irttp",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irttp_proc_read /* get_info */,
-};
-
-struct proc_dir_entry proc_lmp = {
- 0, 5, "irlmp",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irlmp_proc_read /* get_info */,
-};
-
-struct proc_dir_entry proc_lap = {
- 0, 5, "irlap",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irlap_proc_read /* get_info */,
-};
-
-struct proc_dir_entry proc_ias = {
- 0, 5, "irias",
- S_IFREG | S_IRUGO, 1, 0, 0,
- 0, NULL /* ops -- default to array */,
- &irias_proc_read /* get_info */,
+ {"discovery", discovery_proc_read},
+ {"irda_device", irda_device_proc_read},
+ {"irttp", irttp_proc_read},
+ {"irlmp", irlmp_proc_read},
+ {"irlap", irlap_proc_read},
+ {"irias", irias_proc_read},
};
-/*
- * Function proc_delete_dentry (dentry)
- *
- * Copy of proc/root.c because this function is invisible to the irda
- * module
- *
- */
-static void proc_delete_dentry(struct dentry * dentry)
-{
- d_drop(dentry);
-}
-
-static struct dentry_operations proc_dentry_operations =
-{
- NULL, /* revalidate */
- NULL, /* d_hash */
- NULL, /* d_compare */
- proc_delete_dentry /* d_delete(struct dentry *) */
-};
+#define IRDA_ENTRIES_NUM (sizeof(dir)/sizeof(dir[0]))
/*
* Function irda_proc_register (void)
@@ -186,13 +83,13 @@ static struct dentry_operations proc_dentry_operations =
*
*/
void irda_proc_register(void) {
- proc_net_register( &proc_irda);
- proc_register( &proc_irda, &proc_lap);
- proc_register( &proc_irda, &proc_lmp);
- proc_register( &proc_irda, &proc_ttp);
- proc_register( &proc_irda, &proc_ias);
- proc_register( &proc_irda, &proc_irda_device);
- proc_register( &proc_irda, &proc_discovery);
+ int i;
+ proc_irda = create_proc_entry("net/irda", S_IFDIR, NULL);
+#ifdef MODULE
+ proc_irda->fill_inode = &irda_proc_modcount;
+#endif /* MODULE */
+ for (i=0;i<IRDA_ENTRIES_NUM;i++)
+ create_proc_entry(dir[i].name,0,proc_irda)->get_info=dir[i].fn;
}
/*
@@ -202,184 +99,8 @@ void irda_proc_register(void) {
*
*/
void irda_proc_unregister(void) {
- proc_unregister( &proc_irda, proc_discovery.low_ino);
- proc_unregister(&proc_irda, proc_irda_device.low_ino);
- proc_unregister( &proc_irda, proc_ias.low_ino);
- proc_unregister( &proc_irda, proc_ttp.low_ino);
- proc_unregister( &proc_irda, proc_lmp.low_ino);
- proc_unregister( &proc_irda, proc_lap.low_ino);
- proc_unregister( proc_net, proc_irda.low_ino);
-}
-
-/*
- * Function proc_irda_21x_lookup (dir, dentry)
- *
- * This is a copy of proc_lookup from the linux-2.1.x
- *
- */
-int proc_irda_21x_lookup(struct inode * dir, struct dentry *dentry)
-{
- struct inode *inode;
- struct proc_dir_entry * de;
- int error;
-
- error = -ENOTDIR;
- if (!dir || !S_ISDIR(dir->i_mode))
- goto out;
-
- error = -ENOENT;
- inode = NULL;
- de = (struct proc_dir_entry *) dir->u.generic_ip;
- if (de) {
- for (de = de->subdir; de ; de = de->next) {
- if (!de || !de->low_ino)
- continue;
- if (de->namelen != dentry->d_name.len)
- continue;
- if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
- int ino = de->low_ino | (dir->i_ino & ~(0xffff));
- error = -EINVAL;
- inode = proc_get_inode(dir->i_sb, ino, de);
- break;
- }
- }
- }
-
- if (inode) {
- dentry->d_op = &proc_dentry_operations;
- d_add(dentry, inode);
- error = 0;
- }
-out:
- return error;
-}
-
-/*
- * Function proc_irda_readdir (filp, dirent, filldir)
- *
- * This is a copy from linux/fs/proc because the function is invisible
- * to the irda module
- *
- */
-static int proc_irda_readdir( struct file *filp, void *dirent,
- filldir_t filldir)
-{
- struct proc_dir_entry * de;
- unsigned int ino;
int i;
-
- struct inode *inode = filp->f_dentry->d_inode;
- if (!inode || !S_ISDIR(inode->i_mode))
- return -ENOTDIR;
- ino = inode->i_ino;
- de = (struct proc_dir_entry *) inode->u.generic_ip;
- if (!de)
- return -EINVAL;
- i = filp->f_pos;
- switch (i) {
- case 0:
- if (filldir(dirent, ".", 1, i, ino) < 0)
- return 0;
- i++;
- filp->f_pos++;
- /* fall through */
- case 1:
- if (filldir(dirent, "..", 2, i, de->parent->low_ino) < 0)
- return 0;
- i++;
- filp->f_pos++;
- /* fall through */
- default:
- ino &= ~0xffff;
- de = de->subdir;
- i -= 2;
- for (;;) {
- if (!de)
- return 1;
- if (!i)
- break;
- de = de->next;
- i--;
- }
-
- do {
- if (filldir(dirent, de->name, de->namelen, filp->f_pos,
- ino | de->low_ino) < 0)
- return 0;
- filp->f_pos++;
- de = de->next;
- } while (de);
- }
- return 1;
-}
-
-/*
- * Function proc_discovery_read (buf, start, offset, len, unused)
- *
- * Print discovery information in /proc file system
- *
- */
-int proc_discovery_read( char *buf, char **start, off_t offset, int len,
- int unused)
-{
- DISCOVERY *discovery;
- struct lap_cb *lap;
- unsigned long flags;
-
- if ( !irlmp)
- return len;
-
- len = sprintf(buf, "IrLMP: Discovery log:\n\n");
-
- save_flags(flags);
- cli();
-
- lap = ( struct lap_cb *) hashbin_get_first( irlmp->links);
- while( lap != NULL) {
- ASSERT( lap->magic == LMP_LAP_MAGIC, return 0;);
-
- len += sprintf( buf+len, "Link saddr=0x%08x\n", lap->saddr);
- discovery = ( DISCOVERY *) hashbin_get_first( lap->cachelog);
- while ( discovery != NULL) {
- len += sprintf( buf+len, " name: %s,",
- discovery->info);
-
- len += sprintf( buf+len, " hint: ");
- if ( discovery->hint[0] & HINT_PNP)
- len += sprintf( buf+len, "PnP Compatible ");
- if ( discovery->hint[0] & HINT_PDA)
- len += sprintf( buf+len, "PDA/Palmtop ");
- if ( discovery->hint[0] & HINT_COMPUTER)
- len += sprintf( buf+len, "Computer ");
- if ( discovery->hint[0] & HINT_PRINTER)
- len += sprintf( buf+len, "Printer ");
- if ( discovery->hint[0] & HINT_MODEM)
- len += sprintf( buf+len, "Modem ");
- if ( discovery->hint[0] & HINT_FAX)
- len += sprintf( buf+len, "Fax ");
- if ( discovery->hint[0] & HINT_LAN)
- len += sprintf( buf+len, "LAN Access");
-
- if ( discovery->hint[1] & HINT_TELEPHONY)
- len += sprintf( buf+len, "Telephony ");
- if ( discovery->hint[1] & HINT_FILE_SERVER)
- len += sprintf( buf+len, "File Server ");
- if ( discovery->hint[1] & HINT_COMM)
- len += sprintf( buf+len, "IrCOMM ");
- if ( discovery->hint[1] & HINT_OBEX)
- len += sprintf( buf+len, "IrOBEX ");
-
- len += sprintf( buf+len, ", daddr: 0x%08x\n",
- discovery->daddr);
-
- len += sprintf( buf+len, "\n");
-
- discovery = ( DISCOVERY *) hashbin_get_next( lap->cachelog);
- }
- lap = ( struct lap_cb *) hashbin_get_next( irlmp->links);
- }
- restore_flags(flags);
-
- return len;
+ for (i=0;i<IRDA_ENTRIES_NUM;i++)
+ remove_proc_entry(dir[i].name, proc_irda);
+ remove_proc_entry("net/irda", NULL);
}
-
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 3d23c8aef..90029dfd8 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Jun 9 13:29:31 1998
- * Modified at: Wed Jan 13 21:21:22 1999
+ * Modified at: Thu Mar 11 13:27:04 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (C) 1998, Aage Kvalnes <aage@cs.uit.no>
@@ -45,9 +45,10 @@ static __u32 hash( char* name);
* Create hashbin!
*
*/
-hashbin_t *hashbin_new( int type)
+hashbin_t *hashbin_new(int type)
{
hashbin_t* hashbin;
+ int i;
DEBUG( 4, __FUNCTION__ "()\n");
@@ -55,13 +56,19 @@ hashbin_t *hashbin_new( int type)
* Allocate new hashbin
*/
hashbin = kmalloc( sizeof(hashbin_t), GFP_ATOMIC);
+ if (!hashbin)
+ return NULL;
/*
* Initialize structure
*/
- memset( hashbin, 0, sizeof(hashbin_t));
+ memset(hashbin, 0, sizeof(hashbin_t));
hashbin->hb_type = type;
hashbin->magic = HB_MAGIC;
+
+ /* Make sure all spinlock's are unlocked */
+ for (i=0;i<HASHBIN_SIZE;i++)
+ hashbin->hb_mutex[i] = SPIN_LOCK_UNLOCKED;
return hashbin;
}
@@ -159,10 +166,9 @@ void hashbin_lock( hashbin_t* hashbin, __u32 hashv, char* name,
bin = GET_HASHBIN( hashv);
/* Synchronize */
- if ( hashbin->hb_type & HB_GLOBAL ) {
-
+ if ( hashbin->hb_type & HB_GLOBAL )
spin_lock_irqsave( &hashbin->hb_mutex[ bin], flags);
- } else {
+ else {
save_flags( flags);
cli();
}
@@ -174,27 +180,27 @@ void hashbin_lock( hashbin_t* hashbin, __u32 hashv, char* name,
* Unlock the hashbin
*
*/
-void hashbin_unlock( hashbin_t* hashbin, __u32 hashv, char* name,
- unsigned long flags)
+void hashbin_unlock(hashbin_t* hashbin, __u32 hashv, char* name,
+ unsigned long flags)
{
int bin;
- DEBUG( 0, "hashbin_unlock()\n");
+ DEBUG(0, "hashbin_unlock()\n");
- ASSERT( hashbin != NULL, return;);
- ASSERT( hashbin->magic == HB_MAGIC, return;);
+ ASSERT(hashbin != NULL, return;);
+ ASSERT(hashbin->magic == HB_MAGIC, return;);
/*
* Locate hashbin
*/
- if ( name )
- hashv = hash( name );
- bin = GET_HASHBIN( hashv );
+ if (name )
+ hashv = hash(name);
+ bin = GET_HASHBIN(hashv);
/* Release lock */
- if ( hashbin->hb_type & HB_GLOBAL) {
+ if ( hashbin->hb_type & HB_GLOBAL)
spin_unlock_irq( &hashbin->hb_mutex[ bin]);
- } else if ( hashbin->hb_type & HB_LOCAL) {
+ else if (hashbin->hb_type & HB_LOCAL) {
restore_flags( flags);
}
}
@@ -205,13 +211,12 @@ void hashbin_unlock( hashbin_t* hashbin, __u32 hashv, char* name,
* Insert an entry into the hashbin
*
*/
-void hashbin_insert( hashbin_t* hashbin, QUEUE* entry, __u32 hashv,
- char* name)
+void hashbin_insert( hashbin_t* hashbin, QUEUE* entry, __u32 hashv, char* name)
{
unsigned long flags = 0;
int bin;
- DEBUG( 4, "hashbin_insert()\n");
+ DEBUG( 4, __FUNCTION__"()\n");
ASSERT( hashbin != NULL, return;);
ASSERT( hashbin->magic == HB_MAGIC, return;);
@@ -583,7 +588,7 @@ inline void enqueue_last( QUEUE **queue, QUEUE* element)
void enqueue_first(QUEUE **queue, QUEUE* element)
{
- DEBUG( 4, "enqueue_first()\n");
+ DEBUG( 4, __FUNCTION__ "()\n");
/*
* Check if queue is empty.
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index e8a3f9910..0b9a4f189 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun May 24 22:12:06 1998
- * Modified at: Thu Jan 7 10:35:02 1999
+ * Modified at: Fri Apr 23 09:46:38 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997 Dag Brattli, All Rights Reserved.
@@ -31,9 +31,12 @@
#include <net/irda/irda.h>
#define NET_IRDA 412 /* Random number */
-enum { DISCOVERY=1, DEVNAME, COMPRESSION, DEBUG };
+enum { DISCOVERY=1, DEVNAME, COMPRESSION, DEBUG, SLOTS, SLOT_TIMEOUT };
extern int sysctl_discovery;
+extern int sysctl_discovery_slots;
+extern int sysctl_slot_timeout;
+extern int sysctl_fast_poll_increase;
int sysctl_compression = 0;
extern char sysctl_devname[];
@@ -50,9 +53,17 @@ static ctl_table irda_table[] = {
{ COMPRESSION, "compression", &sysctl_compression,
sizeof(int), 0644, NULL, &proc_dointvec },
#ifdef CONFIG_IRDA_DEBUG
- { DEBUG, "debug", &irda_debug,
+ { DEBUG, "debug", &irda_debug,
sizeof(int), 0644, NULL, &proc_dointvec },
#endif
+#ifdef CONFIG_IRDA_FAST_RR
+ { SLOTS, "fast_poll_increase", &sysctl_fast_poll_increase,
+ sizeof(int), 0644, NULL, &proc_dointvec },
+#endif
+ { SLOTS, "discovery_slots", &sysctl_discovery_slots,
+ sizeof(int), 0644, NULL, &proc_dointvec },
+ { SLOT_TIMEOUT, "slot_timeout", &sysctl_slot_timeout,
+ sizeof(int), 0644, NULL, &proc_dointvec },
{ 0 }
};
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 3da804e7d..bf0624eee 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: irttp.c
- * Version: 0.4
+ * Version: 1.2
* Description: Tiny Transport Protocol (TTP) implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:31 1997
- * Modified at: Tue Jan 19 23:56:58 1999
+ * Modified at: Sat Apr 10 10:32:21 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -28,33 +28,35 @@
#include <linux/init.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <net/irda/irda.h>
+#include <net/irda/irmod.h>
#include <net/irda/irlmp.h>
#include <net/irda/irttp.h>
struct irttp_cb *irttp = NULL;
-static void __irttp_close_tsap( struct tsap_cb *self);
+static void __irttp_close_tsap(struct tsap_cb *self);
-static void irttp_data_indication( void *instance, void *sap,
- struct sk_buff *skb);
-static void irttp_udata_indication( void *instance, void *sap,
- struct sk_buff *skb);
-static void irttp_disconnect_indication( void *instance, void *sap,
- LM_REASON reason,
- struct sk_buff *);
-static void irttp_connect_indication( void *instance, void *sap,
- struct qos_info *qos, int max_sdu_size,
- struct sk_buff *skb);
+static int irttp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static int irttp_udata_indication(void *instance, void *sap,
+ struct sk_buff *skb);
+static void irttp_disconnect_indication(void *instance, void *sap,
+ LM_REASON reason,
+ struct sk_buff *);
+static void irttp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *skb);
-static void irttp_run_tx_queue( struct tsap_cb *self);
-static void irttp_run_rx_queue( struct tsap_cb *self);
+static void irttp_run_tx_queue(struct tsap_cb *self);
+static void irttp_run_rx_queue(struct tsap_cb *self);
-static void irttp_flush_queues( struct tsap_cb *self);
-static void irttp_fragment_skb( struct tsap_cb *self, struct sk_buff *skb);
-static struct sk_buff *irttp_reassemble_skb( struct tsap_cb *self);
-static void irttp_start_todo_timer( struct tsap_cb *self, int timeout);
+static void irttp_flush_queues(struct tsap_cb *self);
+static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
+static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
+static void irttp_start_todo_timer(struct tsap_cb *self, int timeout);
/*
* Function irttp_init (void)
@@ -64,21 +66,19 @@ static void irttp_start_todo_timer( struct tsap_cb *self, int timeout);
*/
__initfunc(int irttp_init(void))
{
- DEBUG( 4, "--> irttp_init\n");
-
/* Initialize the irttp structure. */
- if ( irttp == NULL) {
- irttp = kmalloc( sizeof(struct irttp_cb), GFP_KERNEL);
- if ( irttp == NULL)
+ if (irttp == NULL) {
+ irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL);
+ if (irttp == NULL)
return -ENOMEM;
}
- memset( irttp, 0, sizeof(struct irttp_cb));
-
+ memset(irttp, 0, sizeof(struct irttp_cb));
+
irttp->magic = TTP_MAGIC;
- irttp->tsaps = hashbin_new( HB_LOCAL);
- if ( !irttp->tsaps) {
- printk( KERN_WARNING "IrDA: Can't allocate IrTTP hashbin!\n");
+ irttp->tsaps = hashbin_new(HB_LOCAL);
+ if (!irttp->tsaps) {
+ printk(KERN_WARNING "IrDA: Can't allocate IrTTP hashbin!\n");
return -ENOMEM;
}
@@ -91,69 +91,73 @@ __initfunc(int irttp_init(void))
* Called by module destruction/cleanup code
*
*/
+#ifdef MODULE
void irttp_cleanup(void)
{
- DEBUG( 4, "irttp_cleanup\n");
-
/* Check for main structure */
- ASSERT( irttp != NULL, return;);
- ASSERT( irttp->magic == TTP_MAGIC, return;);
+ ASSERT(irttp != NULL, return;);
+ ASSERT(irttp->magic == TTP_MAGIC, return;);
/*
* Delete hashbin and close all TSAP instances in it
*/
- hashbin_delete( irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
+ hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap);
- irttp->magic = ~TTP_MAGIC;
+ irttp->magic = 0;
/* De-allocate main structure */
- kfree( irttp);
+ kfree(irttp);
irttp = NULL;
}
+#endif
/*
* Function irttp_open_tsap (stsap, notify)
*
* Create TSAP connection endpoint,
*/
-struct tsap_cb *irttp_open_tsap( __u8 stsap_sel, int credit,
- struct notify_t *notify)
+struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit,
+ struct notify_t *notify)
{
struct notify_t ttp_notify;
struct tsap_cb *self;
struct lsap_cb *lsap;
- ASSERT( irttp != NULL, return NULL;);
- ASSERT( irttp->magic == TTP_MAGIC, return NULL;);
+ ASSERT(irttp != NULL, return NULL;);
+ ASSERT(irttp->magic == TTP_MAGIC, return NULL;);
- self = kmalloc( sizeof(struct tsap_cb), GFP_ATOMIC);
- if ( self == NULL) {
- printk( KERN_ERR "IrTTP: Can't allocate memory for "
- "TSAP control block!\n");
+ self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+ if (self == NULL) {
+ DEBUG(0, __FUNCTION__ "(), unable to kmalloc!\n");
return NULL;
}
- memset( self, 0, sizeof(struct tsap_cb));
+ memset(self, 0, sizeof(struct tsap_cb));
- init_timer( &self->todo_timer);
+ init_timer(&self->todo_timer);
/* Initialize callbacks for IrLMP to use */
-
- irda_notify_init( &ttp_notify);
+ irda_notify_init(&ttp_notify);
ttp_notify.connect_confirm = irttp_connect_confirm;
ttp_notify.connect_indication = irttp_connect_indication;
ttp_notify.disconnect_indication = irttp_disconnect_indication;
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.instance = self;
- strncpy( ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
+ strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
+ self->magic = TTP_TSAP_MAGIC;
+ self->connected = FALSE;
+
+ skb_queue_head_init(&self->rx_queue);
+ skb_queue_head_init(&self->tx_queue);
+ skb_queue_head_init(&self->rx_fragments);
/*
* Create LSAP at IrLMP layer
*/
- lsap = irlmp_open_lsap( stsap_sel, &ttp_notify);
- if ( lsap == NULL) {
- printk( KERN_ERR "IrTTP, Unable to get LSAP!!\n");
+ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify);
+ if (lsap == NULL) {
+ printk(KERN_ERR "IrTTP, Unable to get LSAP!!\n");
return NULL;
}
@@ -163,27 +167,19 @@ struct tsap_cb *irttp_open_tsap( __u8 stsap_sel, int credit,
* the stsap_sel we have might not be valid anymore
*/
self->stsap_sel = lsap->slsap_sel;
- DEBUG( 4, __FUNCTION__ "(), stsap_sel=%02x\n", self->stsap_sel);
+ DEBUG(4, __FUNCTION__ "(), stsap_sel=%02x\n", self->stsap_sel);
self->notify = *notify;
self->lsap = lsap;
- self->magic = TTP_TSAP_MAGIC;
- skb_queue_head_init( &self->rx_queue);
- skb_queue_head_init( &self->tx_queue);
- skb_queue_head_init( &self->rx_fragments);
+ hashbin_insert(irttp->tsaps, (QUEUE *) self, (int) self, NULL);
- /*
- * Insert ourself into the hashbin
- */
- hashbin_insert( irttp->tsaps, (QUEUE *) self, self->stsap_sel, NULL);
-
- if ( credit > TTP_MAX_QUEUE)
+ if (credit > TTP_MAX_QUEUE)
self->initial_credit = TTP_MAX_QUEUE;
else
self->initial_credit = credit;
-
- return self;
+
+ return self;
}
/*
@@ -193,27 +189,20 @@ struct tsap_cb *irttp_open_tsap( __u8 stsap_sel, int credit,
* deallocation of the TSAP, and resetting of the TSAPs values;
*
*/
-static void __irttp_close_tsap( struct tsap_cb *self)
+static void __irttp_close_tsap(struct tsap_cb *self)
{
- DEBUG( 4, __FUNCTION__ "()\n");
-
/* First make sure we're connected. */
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- irttp_flush_queues( self);
+ irttp_flush_queues(self);
- del_timer( &self->todo_timer);
+ del_timer(&self->todo_timer);
self->connected = FALSE;
self->magic = ~TTP_TSAP_MAGIC;
- /*
- * Deallocate structure
- */
- kfree( self);
-
- DEBUG( 4, "irttp_close_tsap() -->\n");
+ kfree(self);
}
/*
@@ -223,24 +212,41 @@ static void __irttp_close_tsap( struct tsap_cb *self)
* associated with this TSAP
*
*/
-void irttp_close_tsap( struct tsap_cb *self)
+int irttp_close_tsap(struct tsap_cb *self)
{
struct tsap_cb *tsap;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ DEBUG(4, __FUNCTION__ "()\n");
- tsap = hashbin_remove( irttp->tsaps, self->stsap_sel, NULL);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
- ASSERT( tsap == self, return;);
+ /* Make sure tsap has been disconnected */
+ if (self->connected) {
+ /* Check if disconnect is not pending */
+ if (!self->disconnect_pend) {
+ DEBUG(0, __FUNCTION__ "(), TSAP still connected!\n");
+ irttp_disconnect_request(self, NULL, P_NORMAL);
+ }
+ self->close_pend = TRUE;
+ irttp_start_todo_timer(self, 100);
+
+ return 0; /* Will be back! */
+ }
+
+ tsap = hashbin_remove(irttp->tsaps, (int) self, NULL);
+
+ ASSERT(tsap == self, return -1;);
/* Close corresponding LSAP */
- if ( self->lsap) {
- irlmp_close_lsap( self->lsap);
+ if (self->lsap) {
+ irlmp_close_lsap(self->lsap);
self->lsap = NULL;
}
- __irttp_close_tsap( self);
+ __irttp_close_tsap(self);
+
+ return 0;
}
/*
@@ -249,26 +255,26 @@ void irttp_close_tsap( struct tsap_cb *self)
* Send unreliable data on this TSAP
*
*/
-int irttp_udata_request( struct tsap_cb *self, struct sk_buff *skb)
+int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
{
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return -1;);
- ASSERT( skb != NULL, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ ASSERT(skb != NULL, return -1;);
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
/* Check that nothing bad happens */
- if (( skb->len == 0) || ( !self->connected)) {
- DEBUG( 0, __FUNCTION__ "(), No data, or not connected\n");
+ if ((skb->len == 0) || (!self->connected)) {
+ DEBUG(1, __FUNCTION__ "(), No data, or not connected\n");
return -1;
}
- if ( skb->len > self->max_seg_size) {
- DEBUG( 0, __FUNCTION__ "(), UData is to large for IrLAP!\n");
+ if (skb->len > self->max_seg_size) {
+ DEBUG(1, __FUNCTION__ "(), UData is to large for IrLAP!\n");
return -1;
}
- irlmp_udata_request( self->lsap, skb);
+ irlmp_udata_request(self->lsap, skb);
self->stats.tx_packets++;
return 0;
@@ -280,66 +286,62 @@ int irttp_udata_request( struct tsap_cb *self, struct sk_buff *skb)
* Queue frame for transmission. If SAR is enabled, fragement the frame
* and queue the fragments for transmission
*/
-int irttp_data_request( struct tsap_cb *self, struct sk_buff *skb)
+int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
{
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
-
- ASSERT( self != NULL, return -1;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return -1;);
- ASSERT( skb != NULL, return -1;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ ASSERT(skb != NULL, return -1;);
/* Check that nothing bad happens */
- if (( skb->len == 0) || ( !self->connected)) {
- DEBUG( 4, __FUNCTION__ "(), No data, or not connected\n");
- return -1;
+ if ((skb->len == 0) || (!self->connected)) {
+ DEBUG(4, __FUNCTION__ "(), No data, or not connected\n");
+ return -ENOTCONN;
}
/*
* Check if SAR is disabled, and the frame is larger than what fits
* inside an IrLAP frame
*/
- if (( self->tx_max_sdu_size == 0) &&
- ( skb->len > self->max_seg_size))
- {
- DEBUG( 0, __FUNCTION__
+ if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) {
+ DEBUG(1, __FUNCTION__
"(), SAR disabled, and data is to large for IrLAP!\n");
- return -1;
+ return -EMSGSIZE;
}
/*
* Check if SAR is enabled, and the frame is larger than the
* TxMaxSduSize
*/
- if (( self->tx_max_sdu_size != 0) &&
+ if ((self->tx_max_sdu_size != 0) &&
+ (self->tx_max_sdu_size != SAR_UNBOUND) &&
(skb->len > self->tx_max_sdu_size))
{
- DEBUG( 0, __FUNCTION__ "(), SAR enabled, "
+ DEBUG(1, __FUNCTION__ "(), SAR enabled, "
"but data is larger than TxMaxSduSize!\n");
- return -1;
+ return -EMSGSIZE;
}
/*
* Check if transmit queue is full
*/
- if ( skb_queue_len( &self->tx_queue) >= TTP_MAX_QUEUE) {
+ if (skb_queue_len(&self->tx_queue) >= TTP_MAX_QUEUE) {
/*
* Give it a chance to empty itself
*/
- irttp_run_tx_queue( self);
+ irttp_run_tx_queue(self);
- return -1;
+ return -ENOBUFS;
}
/* Queue frame, or queue frame segments */
- if (( self->tx_max_sdu_size == 0) ||
- ( skb->len < self->max_seg_size)) {
+ if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) {
/* Queue frame */
- frame = skb_push( skb, TTP_HEADER);
+ frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
- DEBUG( 4, __FUNCTION__ "(), queueing original skb\n");
- skb_queue_tail( &self->tx_queue, skb);
+ DEBUG(4, __FUNCTION__ "(), queueing original skb\n");
+ skb_queue_tail(&self->tx_queue, skb);
} else {
/*
* Fragment the frame, this function will also queue the
@@ -347,117 +349,103 @@ int irttp_data_request( struct tsap_cb *self, struct sk_buff *skb)
* queue may be overfilled by all the segments for a little
* while
*/
- irttp_fragment_skb( self, skb);
+ irttp_fragment_skb(self, skb);
}
/* Check if we can accept more data from client */
- if (( !self->tx_sdu_busy) &&
- ( skb_queue_len( &self->tx_queue) > HIGH_THRESHOLD)) {
+ if ((!self->tx_sdu_busy) &&
+ (skb_queue_len(&self->tx_queue) > HIGH_THRESHOLD)) {
/* Tx queue filling up, so stop client */
self->tx_sdu_busy = TRUE;
- if ( self->notify.flow_indication) {
- self->notify.flow_indication( self->notify.instance,
- self,
- FLOW_STOP);
+ if (self->notify.flow_indication) {
+ self->notify.flow_indication(
+ self->notify.instance, self, FLOW_STOP);
}
}
/* Try to make some progress */
- irttp_run_tx_queue( self);
+ irttp_run_tx_queue(self);
return 0;
}
/*
- * Function irttp_xmit (self)
+ * Function irttp_run_tx_queue (self)
*
* If possible, transmit a frame queued for transmission.
*
*/
-static void irttp_run_tx_queue( struct tsap_cb *self)
+static void irttp_run_tx_queue(struct tsap_cb *self)
{
struct sk_buff *skb = NULL;
unsigned long flags;
__u8 *frame;
int n;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
-
- if ( irda_lock( &self->tx_queue_lock) == FALSE)
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+
+ if (irda_lock(&self->tx_queue_lock) == FALSE)
return;
- while (( self->send_credit > 0) && !skb_queue_empty( &self->tx_queue)){
-
- skb = skb_dequeue( &self->tx_queue);
- ASSERT( skb != NULL, return;);
+ while ((self->send_credit > 0) && !skb_queue_empty(&self->tx_queue)) {
+ skb = skb_dequeue(&self->tx_queue);
+ ASSERT(skb != NULL, return;);
/* Make room for TTP header */
- ASSERT( skb_headroom( skb) >= TTP_HEADER, return;);
+ ASSERT(skb_headroom(skb) >= TTP_HEADER, return;);
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
- save_flags( flags);
- cli();
+ spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
- /* Only space for 127 credits in frame */
- if ( n > 127) {
+ /* Only room for 127 credits in frame */
+ if (n > 127) {
self->avail_credit = n-127;
n = 127;
}
self->remote_credit += n;
self->send_credit--;
- restore_flags(flags);
+ spin_unlock_irqrestore(&self->lock, flags);
- DEBUG( 4, "irttp_xmit: Giving away %d credits\n", n);
-
/*
* More bit must be set by the data_request() or fragment()
* functions
*/
frame = skb->data;
- DEBUG( 4, __FUNCTION__ "(), More=%s\n", frame[0] & 0x80 ?
+ DEBUG(4, __FUNCTION__ "(), More=%s\n", frame[0] & 0x80 ?
"TRUE" : "FALSE" );
frame[0] |= (__u8) (n & 0x7f);
- irlmp_data_request( self->lsap, skb);
+ irlmp_data_request(self->lsap, skb);
self->stats.tx_packets++;
/* Check if we can accept more frames from client */
- if (( self->tx_sdu_busy) &&
- ( skb_queue_len( &self->tx_queue) < LOW_THRESHOLD)) {
+ if ((self->tx_sdu_busy) &&
+ (skb_queue_len(&self->tx_queue) < LOW_THRESHOLD))
+ {
self->tx_sdu_busy = FALSE;
- if ( self->notify.flow_indication)
- self->notify.flow_indication( self->notify.instance,
- self,
- FLOW_START);
+ if (self->notify.flow_indication)
+ self->notify.flow_indication(
+ self->notify.instance, self,
+ FLOW_START);
}
}
/* Reset lock */
self->tx_queue_lock = 0;
-
- /* Check if there is any disconnect request pending */
- if ( self->disconnect_pend) {
- if ( self->disconnect_skb) {
- irttp_disconnect_request( self, self->disconnect_skb,
- P_NORMAL);
- self->disconnect_skb = NULL;
- } else
- irttp_disconnect_request( self, NULL, P_NORMAL);
- }
}
/*
@@ -466,80 +454,78 @@ static void irttp_run_tx_queue( struct tsap_cb *self)
* Send a dataless flowdata TTP-PDU and give available credit to peer
* TSAP
*/
-void irttp_give_credit( struct tsap_cb *self)
+void irttp_give_credit(struct tsap_cb *self)
{
struct sk_buff *tx_skb = NULL;
unsigned long flags;
int n;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- DEBUG( 4, "irttp_give_credit() send=%d,avail=%d,remote=%d\n",
+ DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
self->send_credit, self->avail_credit, self->remote_credit);
/* Give credit to peer */
- tx_skb = dev_alloc_skb( 64);
- if ( tx_skb == NULL) {
- DEBUG( 0, "irttp_give_credit: "
- "Could not allocate an sk_buff of length %d\n", 64);
+ tx_skb = dev_alloc_skb(64);
+ if (!tx_skb)
return;
- }
/* Reserve space for LMP, and LAP header */
- skb_reserve( tx_skb, LMP_HEADER+LAP_HEADER);
+ skb_reserve(tx_skb, LMP_HEADER+LAP_HEADER);
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
* nobody messes with the credits while we update them.
*/
- save_flags( flags);
- cli();
+ spin_lock_irqsave(&self->lock, flags);
n = self->avail_credit;
self->avail_credit = 0;
/* Only space for 127 credits in frame */
- if ( n > 127) {
+ if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
self->remote_credit += n;
- restore_flags(flags);
+ spin_unlock_irqrestore(&self->lock, flags);
- skb_put( tx_skb, 1);
- tx_skb->data[0] = (__u8) ( n & 0x7f);
+ skb_put(tx_skb, 1);
+ tx_skb->data[0] = (__u8) (n & 0x7f);
- irlmp_data_request( self->lsap, tx_skb);
+ irlmp_data_request(self->lsap, tx_skb);
self->stats.tx_packets++;
}
/*
* Function irttp_udata_indication (instance, sap, skb)
*
- *
+ * Received some unit-data (unreliable)
*
*/
-void irttp_udata_indication( void *instance, void *sap, struct sk_buff *skb)
+static int irttp_udata_indication(void *instance, void *sap,
+ struct sk_buff *skb)
{
struct tsap_cb *self;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
self = (struct tsap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ ASSERT(skb != NULL, return -1;);
/* Just pass data to layer above */
- if ( self->notify.udata_indication) {
- self->notify.udata_indication( self->notify.instance, self,
- skb);
+ if (self->notify.udata_indication) {
+ self->notify.udata_indication(self->notify.instance, self, skb);
}
self->stats.rx_packets++;
+
+ return 0;
}
/*
@@ -548,7 +534,8 @@ void irttp_udata_indication( void *instance, void *sap, struct sk_buff *skb)
* Receive segment from IrLMP.
*
*/
-void irttp_data_indication( void *instance, void *sap, struct sk_buff *skb)
+static int irttp_data_indication(void *instance, void *sap,
+ struct sk_buff *skb)
{
struct tsap_cb *self;
int more;
@@ -557,17 +544,17 @@ void irttp_data_indication( void *instance, void *sap, struct sk_buff *skb)
self = (struct tsap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
+ ASSERT(skb != NULL, return -1;);
frame = skb->data;
n = frame[0] & 0x7f; /* Extract the credits */
more = frame[0] & 0x80;
- DEBUG( 4, __FUNCTION__"(), got %d credits, TSAP sel=%02x\n",
- n, self->stsap_sel);
+ DEBUG(3, __FUNCTION__"(), got %d credits, TSAP sel=%02x\n",
+ n, self->stsap_sel);
self->stats.rx_packets++;
@@ -575,7 +562,7 @@ void irttp_data_indication( void *instance, void *sap, struct sk_buff *skb)
* Data or dataless frame? Dataless frames only contain the
* TTP_HEADER
*/
- if ( skb->len == 1) {
+ if (skb->len == 1) {
/* Dataless flowdata TTP-PDU */
self->send_credit += n;
} else {
@@ -587,27 +574,28 @@ void irttp_data_indication( void *instance, void *sap, struct sk_buff *skb)
* We don't remove the TTP header, since we must preserve the
* more bit, so the defragment routing knows what to do
*/
- skb_queue_tail( &self->rx_queue, skb);
+ skb_queue_tail(&self->rx_queue, skb);
}
- irttp_run_rx_queue( self);
+ irttp_run_rx_queue(self);
/*
* Give avay some credits to peer?
*/
- if (( skb_queue_empty( &self->tx_queue)) &&
- ( self->remote_credit < LOW_THRESHOLD) &&
- ( self->avail_credit > 0))
+ if ((skb_queue_empty(&self->tx_queue)) &&
+ (self->remote_credit < LOW_THRESHOLD) &&
+ (self->avail_credit > 0))
{
/* Schedule to start immediately after this thread */
- irttp_start_todo_timer( self, 0);
+ irttp_start_todo_timer(self, 0);
}
/* If peer has given us some credites and we didn't have anyone
* from before, the we need to shedule the tx queue?
*/
- if ( self->send_credit == n)
- irttp_start_todo_timer( self, 0);
+ if (self->send_credit == n)
+ irttp_start_todo_timer(self, 0);
+ return 0;
}
/*
@@ -617,26 +605,26 @@ void irttp_data_indication( void *instance, void *sap, struct sk_buff *skb)
* delivering frames if the receive queues are starting to get full, or
* to tell IrTTP to start delivering frames again.
*/
-void irttp_flow_request( struct tsap_cb *self, LOCAL_FLOW flow)
+void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow)
{
- DEBUG( 0, __FUNCTION__ "()\n");
+ DEBUG(1, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- switch ( flow) {
+ switch (flow) {
case FLOW_STOP:
- DEBUG( 0, __FUNCTION__ "(), flow stop\n");
+ DEBUG(1, __FUNCTION__ "(), flow stop\n");
self->rx_sdu_busy = TRUE;
break;
case FLOW_START:
- DEBUG( 0, __FUNCTION__ "(), flow start\n");
+ DEBUG(1, __FUNCTION__ "(), flow start\n");
self->rx_sdu_busy = FALSE;
- irttp_run_rx_queue( self);
+ irttp_run_rx_queue(self);
break;
default:
- DEBUG( 0, __FUNCTION__ "(), Unknown flow command!\n");
+ DEBUG(1, __FUNCTION__ "(), Unknown flow command!\n");
}
}
@@ -646,38 +634,36 @@ void irttp_flow_request( struct tsap_cb *self, LOCAL_FLOW flow)
* Try to connect to remote destination TSAP selector
*
*/
-void irttp_connect_request( struct tsap_cb *self, __u8 dtsap_sel, __u32 daddr,
- struct qos_info *qos, int max_sdu_size,
- struct sk_buff *userdata)
+int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
+ __u32 saddr, __u32 daddr,
+ struct qos_info *qos, __u32 max_sdu_size,
+ struct sk_buff *userdata)
{
struct sk_buff *skb;
__u8 *frame;
__u8 n;
- DEBUG( 4, __FUNCTION__ "(), max_sdu_size=%d\n", max_sdu_size);
+ DEBUG(4, __FUNCTION__ "(), max_sdu_size=%d\n", max_sdu_size);
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return -1;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
/* Any userdata supplied? */
- if ( userdata == NULL) {
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__ "Could not allocate an "
- "sk_buff of length %d\n", 64);
- return;
- }
+ if (userdata == NULL) {
+ skb = dev_alloc_skb(64);
+ if (!skb)
+ return -ENOMEM;
/* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve( skb, (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER));
+ skb_reserve(skb, (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER));
} else {
skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
- ASSERT( skb_headroom( userdata) >=
- (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER), return;);
+ ASSERT(skb_headroom(userdata) >=
+ (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER), return -1;);
}
/* Initialize connection parameters */
@@ -696,7 +682,7 @@ void irttp_connect_request( struct tsap_cb *self, __u8 dtsap_sel, __u32 daddr,
/*
* Give away max 127 credits for now
*/
- if ( n > 127) {
+ if (n > 127) {
self->avail_credit=n-127;
n = 127;
}
@@ -704,29 +690,32 @@ void irttp_connect_request( struct tsap_cb *self, __u8 dtsap_sel, __u32 daddr,
self->remote_credit = n;
/* SAR enabled? */
- if ( max_sdu_size > 0) {
- ASSERT( skb_headroom( skb) >=
+ if (max_sdu_size > 0) {
+ ASSERT(skb_headroom(skb) >=
(TTP_HEADER_WITH_SAR+LMP_CONTROL_HEADER+LAP_HEADER),
- return;);
+ return -1;);
/* Insert SAR parameters */
- frame = skb_push( skb, TTP_HEADER_WITH_SAR);
+ frame = skb_push(skb, TTP_HEADER_WITH_SAR);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
- *((__u16 *) (frame+4))= htons( max_sdu_size); /* Big endian! */
+
+ put_unaligned(cpu_to_be16((__u16) max_sdu_size),
+ (__u16 *)(frame+4));
} else {
/* Insert plain TTP header */
- frame = skb_push( skb, TTP_HEADER);
+ frame = skb_push(skb, TTP_HEADER);
/* Insert initial credit in frame */
frame[0] = n & 0x7f;
}
/* Connect with IrLMP. No QoS parameters for now */
- irlmp_connect_request( self->lsap, dtsap_sel, daddr, qos, skb);
+ return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos,
+ skb);
}
/*
@@ -735,24 +724,22 @@ void irttp_connect_request( struct tsap_cb *self, __u8 dtsap_sel, __u32 daddr,
* Sevice user confirms TSAP connection with peer.
*
*/
-void irttp_connect_confirm( void *instance, void *sap, struct qos_info *qos,
- int max_seg_size, struct sk_buff *skb)
+void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos,
+ __u32 max_seg_size, struct sk_buff *skb)
{
struct tsap_cb *self;
+ int parameters;
__u8 *frame;
+ __u8 plen, pi, pl;
__u8 n;
- int parameters;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
self = (struct tsap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
-
- /* FIXME: just remove this when we know its working */
- ASSERT( max_seg_size == qos->data_size.value, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
self->max_seg_size = max_seg_size-LMP_HEADER-LAP_HEADER;
@@ -760,40 +747,59 @@ void irttp_connect_confirm( void *instance, void *sap, struct qos_info *qos,
* Check if we have got some QoS parameters back! This should be the
* negotiated QoS for the link.
*/
- if ( qos) {
- DEBUG( 4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
+ if (qos) {
+ DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n",
qos->baud_rate.bits);
- DEBUG( 4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
+ DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n",
qos->baud_rate.value);
}
frame = skb->data;
n = frame[0] & 0x7f;
- DEBUG( 4, __FUNCTION__ "(), Initial send_credit=%d\n", n);
+ DEBUG(4, __FUNCTION__ "(), Initial send_credit=%d\n", n);
self->send_credit = n;
self->tx_max_sdu_size = 0;
self->connected = TRUE;
parameters = frame[0] & 0x80;
- if ( parameters) {
- DEBUG( 4, __FUNCTION__ "(), Contains parameters!\n");
-
- self->tx_max_sdu_size = ntohs(*(__u16 *)(frame+4));
- DEBUG( 4, __FUNCTION__ "(), RxMaxSduSize=%d\n",
- self->tx_max_sdu_size);
+ if (parameters) {
+ plen = frame[1];
+ pi = frame[2];
+ pl = frame[3];
+
+ switch (pl) {
+ case 1:
+ self->tx_max_sdu_size = *(frame+4);
+ break;
+ case 2:
+ self->tx_max_sdu_size =
+ be16_to_cpu(get_unaligned((__u16 *)(frame+4)));
+ break;
+ case 4:
+ self->tx_max_sdu_size =
+ be32_to_cpu(get_unaligned((__u32 *)(frame+4)));
+ break;
+ default:
+ printk(KERN_ERR __FUNCTION__
+ "() illegal value length for max_sdu_size!\n");
+ self->tx_max_sdu_size = 0;
+ };
+
+ DEBUG(4, __FUNCTION__ "(), RxMaxSduSize=%d\n",
+ self->tx_max_sdu_size);
}
- DEBUG( 4, "irttp_connect_confirm() send=%d,avail=%d,remote=%d\n",
- self->send_credit, self->avail_credit, self->remote_credit);
+ DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
+ self->send_credit, self->avail_credit, self->remote_credit);
- skb_pull( skb, TTP_HEADER);
+ skb_pull(skb, TTP_HEADER);
- if ( self->notify.connect_confirm) {
- self->notify.connect_confirm( self->notify.instance, self,
- qos, self->tx_max_sdu_size,
- skb);
+ if (self->notify.connect_confirm) {
+ self->notify.connect_confirm(self->notify.instance, self,
+ qos, self->tx_max_sdu_size,
+ skb);
}
}
@@ -803,32 +809,31 @@ void irttp_connect_confirm( void *instance, void *sap, struct qos_info *qos,
* Some other device is connecting to this TSAP
*
*/
-void irttp_connect_indication( void *instance, void *sap,
- struct qos_info *qos, int max_seg_size,
- struct sk_buff *skb)
+void irttp_connect_indication(void *instance, void *sap,
+ struct qos_info *qos, __u32 max_seg_size,
+ struct sk_buff *skb)
{
struct tsap_cb *self;
- __u8 *frame;
+ struct lsap_cb *lsap;
int parameters;
- int n;
+ __u8 *frame;
+ __u8 plen, pi, pl;
+ __u8 n;
self = (struct tsap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
- /* FIXME: just remove this when we know its working */
- ASSERT( max_seg_size == qos->data_size.value, return;);
+ lsap = (struct lsap_cb *) sap;
self->max_seg_size = max_seg_size-LMP_HEADER-LAP_HEADER;
- DEBUG( 4, "irttp_connect_indication(), TSAP sel=%02x\n",
- self->stsap_sel);
+ DEBUG(4, __FUNCTION__ "(), TSAP sel=%02x\n", self->stsap_sel);
- /* FIXME: Need to update dtsap_sel if its equal to LSAP_ANY */
-/* if ( self->dtsap_sel == LSAP_ANY) */
-/* self->dtsap_sel = lsap->dlsap_sel; */
+ /* Need to update dtsap_sel if its equal to LSAP_ANY */
+ self->dtsap_sel = lsap->dlsap_sel;
frame = skb->data;
n = frame[0] & 0x7f;
@@ -837,22 +842,43 @@ void irttp_connect_indication( void *instance, void *sap,
self->tx_max_sdu_size = 0;
parameters = frame[0] & 0x80;
- if ( parameters) {
- DEBUG( 4, __FUNCTION__ "(), Contains parameters!\n");
-
- self->tx_max_sdu_size = ntohs(*(__u16 *)(frame+4));
- DEBUG( 4, __FUNCTION__ "(), MaxSduSize=%d\n",
- self->tx_max_sdu_size);
+ if (parameters) {
+ DEBUG(3, __FUNCTION__ "(), Contains parameters!\n");
+ plen = frame[1];
+ pi = frame[2];
+ pl = frame[3];
+
+ switch (pl) {
+ case 1:
+ self->tx_max_sdu_size = *(frame+4);
+ break;
+ case 2:
+ self->tx_max_sdu_size =
+ be16_to_cpu(get_unaligned((__u16 *)(frame+4)));
+ break;
+ case 4:
+ self->tx_max_sdu_size =
+ be32_to_cpu(get_unaligned((__u32 *)(frame+4)));
+ break;
+ default:
+ printk(KERN_ERR __FUNCTION__
+ "() illegal value length for max_sdu_size!\n");
+ self->tx_max_sdu_size = 0;
+ };
+
+
+ DEBUG(3, __FUNCTION__ "(), MaxSduSize=%d\n",
+ self->tx_max_sdu_size);
}
- DEBUG( 4, "irttp_connect_indication: initial send_credit=%d\n", n);
+ DEBUG(4, __FUNCTION__ "(), initial send_credit=%d\n", n);
- skb_pull( skb, 1);
+ skb_pull(skb, 1); /* Remove TTP header */
- if ( self->notify.connect_indication) {
- self->notify.connect_indication( self->notify.instance, self,
- qos, self->rx_max_sdu_size,
- skb);
+ if (self->notify.connect_indication) {
+ self->notify.connect_indication(self->notify.instance, self,
+ qos, self->rx_max_sdu_size,
+ skb);
}
}
@@ -863,37 +889,34 @@ void irttp_connect_indication( void *instance, void *sap,
* IrLMP!
*
*/
-void irttp_connect_response( struct tsap_cb *self, int max_sdu_size,
- struct sk_buff *userdata)
+void irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
+ struct sk_buff *userdata)
{
struct sk_buff *skb;
__u8 *frame;
__u8 n;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- DEBUG( 4, __FUNCTION__ "(), Source TSAP selector=%02x\n",
- self->stsap_sel);
+ DEBUG(4, __FUNCTION__ "(), Source TSAP selector=%02x\n",
+ self->stsap_sel);
/* Any userdata supplied? */
- if ( userdata == NULL) {
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__ "Could not allocate an "
- "sk_buff of length %d\n", 64);
+ if (userdata == NULL) {
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
/* Reserve space for MUX_CONTROL and LAP header */
- skb_reserve( skb, (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER));
+ skb_reserve(skb, (TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER));
} else {
skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
- ASSERT( skb_headroom( skb) >=
+ ASSERT(skb_headroom(skb) >=
(TTP_HEADER+LMP_CONTROL_HEADER+LAP_HEADER), return;);
}
@@ -906,7 +929,7 @@ void irttp_connect_response( struct tsap_cb *self, int max_sdu_size,
n = self->initial_credit;
/* Frame has only space for max 127 credits (7 bits) */
- if ( n > 127) {
+ if (n > 127) {
self->avail_credit = n - 127;
n = 127;
}
@@ -915,117 +938,152 @@ void irttp_connect_response( struct tsap_cb *self, int max_sdu_size,
self->connected = TRUE;
/* SAR enabled? */
- if ( max_sdu_size > 0) {
- ASSERT( skb_headroom( skb) >=
- (TTP_HEADER_WITH_SAR+LMP_CONTROL_HEADER+LAP_HEADER),
- return;);
+ if (max_sdu_size > 0) {
+ ASSERT(skb_headroom(skb) >=
+ (TTP_HEADER_WITH_SAR+LMP_CONTROL_HEADER+LAP_HEADER),
+ return;);
/* Insert TTP header with SAR parameters */
- frame = skb_push( skb, TTP_HEADER_WITH_SAR);
+ frame = skb_push(skb, TTP_HEADER_WITH_SAR);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
frame[2] = 0x01; /* MaxSduSize */
frame[3] = 0x02; /* Value length */
- *((__u16 *) (frame+4))= htons( max_sdu_size);
+
+ put_unaligned(cpu_to_be16((__u16) max_sdu_size),
+ (__u16 *)(frame+4));
} else {
/* Insert TTP header */
- frame = skb_push( skb, TTP_HEADER);
+ frame = skb_push(skb, TTP_HEADER);
frame[0] = n & 0x7f;
}
- irlmp_connect_response( self->lsap, skb);
+ irlmp_connect_response(self->lsap, skb);
+}
+
+/*
+ * Function irttp_dup (self, instance)
+ *
+ * Duplicate TSAP, can be used by servers to confirm a connection on a
+ * new TSAP so it can keep listening on the old one.
+ */
+struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
+{
+ struct tsap_cb *new;
+
+ DEBUG(1, __FUNCTION__ "()\n");
+
+ if (!hashbin_find(irttp->tsaps, (int) orig, NULL)) {
+ DEBUG(0, __FUNCTION__ "(), unable to find TSAP\n");
+ return NULL;
+ }
+ new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+ if (!new) {
+ DEBUG(0, __FUNCTION__ "(), unable to kmalloc\n");
+ return NULL;
+ }
+ /* Dup */
+ memcpy(new, orig, sizeof(struct tsap_cb));
+ new->notify.instance = instance;
+ new->lsap = irlmp_dup(orig->lsap, new);
+
+ /* Not everything should be copied */
+ init_timer(&new->todo_timer);
+
+ skb_queue_head_init(&new->rx_queue);
+ skb_queue_head_init(&new->tx_queue);
+ skb_queue_head_init(&new->rx_fragments);
+
+ hashbin_insert(irttp->tsaps, (QUEUE *) new, (int) new, NULL);
+
+ return new;
}
/*
- * Function irttp_disconnect_request ( self)
+ * Function irttp_disconnect_request (self)
*
* Close this connection please! If priority is high, the queued data
* segments, if any, will be deallocated first
*
*/
-void irttp_disconnect_request( struct tsap_cb *self, struct sk_buff *userdata,
- int priority)
+void irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
+ int priority)
{
struct sk_buff *skb;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(2, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Already disconnected? */
- if ( !self->connected) {
- DEBUG( 4, __FUNCTION__ "(), already disconnected!\n");
+ if (!self->connected) {
+ DEBUG(4, __FUNCTION__ "(), already disconnected!\n");
return;
}
/* Disconnect already pending? */
- if ( self->disconnect_pend) {
- DEBUG( 0, __FUNCTION__ "(), disconnect already pending\n");
- if ( userdata) {
- dev_kfree_skb( userdata);
+ if (self->disconnect_pend) {
+ DEBUG(1, __FUNCTION__ "(), disconnect already pending\n");
+ if (userdata) {
+ dev_kfree_skb(userdata);
}
/* Try to make some progress */
- irttp_run_rx_queue( self);
+ irttp_run_rx_queue(self);
return;
}
/*
* Check if there is still data segments in the transmit queue
*/
- if ( skb_queue_len( &self->tx_queue) > 0) {
- if ( priority == P_HIGH) {
- DEBUG( 0, __FUNCTION__ "High priority!!()\n" );
+ if (skb_queue_len(&self->tx_queue) > 0) {
+ if (priority == P_HIGH) {
+ DEBUG(1, __FUNCTION__ "High priority!!()\n" );
/*
* No need to send the queued data, if we are
* disconnecting right now since the data will
* not have any usable connection to be sent on
*/
- irttp_flush_queues( self);
- } else if ( priority == P_NORMAL) {
+ irttp_flush_queues(self);
+ } else if (priority == P_NORMAL) {
/*
* Must delay disconnect til after all data segments
* have been sent an the tx_queue is empty
*/
- if ( userdata)
+ if (userdata)
self->disconnect_skb = userdata;
else
self->disconnect_skb = NULL;
self->disconnect_pend = TRUE;
- irttp_run_tx_queue( self);
- /*
- * irttp_xmit will call us again when the tx_queue
- * is empty
- */
+ irttp_run_tx_queue(self);
+
+ irttp_start_todo_timer(self, 100);
return;
}
}
- DEBUG( 0, __FUNCTION__ "(), Disconnecting ...\n");
+ DEBUG(1, __FUNCTION__ "(), Disconnecting ...\n");
self->connected = FALSE;
- if ( !userdata) {
- skb = dev_alloc_skb( 64);
- if (skb == NULL) {
- DEBUG( 0, __FUNCTION__ "(), Could not allocate an "
- "sk_buff of length %d\n", 64);
+ if (!userdata) {
+ skb = dev_alloc_skb(64);
+ if (!skb)
return;
- }
-
+
/*
* Reserve space for MUX and LAP header
*/
- skb_reserve( skb, LMP_CONTROL_HEADER+LAP_HEADER);
-
+ skb_reserve(skb, LMP_CONTROL_HEADER+LAP_HEADER);
+
userdata = skb;
}
- irlmp_disconnect_request( self->lsap, userdata);
+ irlmp_disconnect_request(self->lsap, userdata);
}
/*
@@ -1034,26 +1092,58 @@ void irttp_disconnect_request( struct tsap_cb *self, struct sk_buff *userdata,
* Disconnect indication, TSAP disconnected by peer?
*
*/
-void irttp_disconnect_indication( void *instance, void *sap, LM_REASON reason,
- struct sk_buff *userdata)
+void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason,
+ struct sk_buff *userdata)
{
struct tsap_cb *self;
- DEBUG( 4, "irttp_disconnect_indication()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- self = ( struct tsap_cb *) instance;
+ self = (struct tsap_cb *) instance;
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
self->connected = FALSE;
- /*
- * Use callback to notify layer above
+ if (!self->notify.disconnect_indication)
+ return;
+
+ self->notify.disconnect_indication(self->notify.instance, self, reason,
+ userdata);
+}
+
+/*
+ * Function irttp_do_data_indication (self, skb)
+ *
+ * Try to deliver reassebled skb to layer above, and requeue it if that
+ * for some reason should fail. We mark rx sdu as busy to apply back
+ * pressure is necessary.
+ */
+void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb)
+{
+ int err;
+
+ err = self->notify.data_indication(self->notify.instance, self, skb);
+
+ /* Usually the layer above will notify that it's input queue is
+ * starting to get filled by using the flow request, but this may
+ * be difficult, so it can instead just refuse to eat it and just
+ * give an error back
*/
- if ( self->notify.disconnect_indication)
- self->notify.disconnect_indication( self->notify.instance,
- self, reason, userdata);
+ if (err == -ENOMEM) {
+ DEBUG(0, __FUNCTION__ "() requeueing skb!\n");
+
+ /* Make sure we take a break */
+ self->rx_sdu_busy = TRUE;
+
+ /* Need to push the header in again */
+ skb_push(skb, TTP_HEADER);
+ skb->data[0] = 0x00; /* Make sure MORE bit is cleared */
+
+ /* Put skb back on queue */
+ skb_queue_head(&self->rx_queue, skb);
+ }
}
/*
@@ -1062,43 +1152,27 @@ void irttp_disconnect_indication( void *instance, void *sap, LM_REASON reason,
* Check if we have any frames to be transmitted, or if we have any
* available credit to give away.
*/
-void irttp_run_rx_queue( struct tsap_cb *self)
+void irttp_run_rx_queue(struct tsap_cb *self)
{
struct sk_buff *skb;
- __u8 *frame;
int more = 0;
- void *instance;
-
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
-
- instance = self->notify.instance;
- ASSERT( instance != NULL, return;);
- DEBUG( 4, "irttp_do_events() send=%d,avail=%d,remote=%d\n",
+ DEBUG(4, __FUNCTION__ "() send=%d,avail=%d,remote=%d\n",
self->send_credit, self->avail_credit, self->remote_credit);
- if ( irda_lock( &self->rx_queue_lock) == FALSE)
+ if (irda_lock(&self->rx_queue_lock) == FALSE)
return;
/*
- * Process receive queue
+ * Reassemble all frames in receive queue and deliver them
*/
- while (( !skb_queue_empty( &self->rx_queue)) && !self->rx_sdu_busy) {
-
- skb = skb_dequeue( &self->rx_queue);
- if ( !skb)
- break; /* Should not happend, but ... */
-
+ while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) {
self->avail_credit++;
- frame = skb->data;
- more = frame[0] & 0x80;
- DEBUG( 4, __FUNCTION__ "(), More=%s\n", more ? "TRUE" :
- "FALSE");
+ more = skb->data[0] & 0x80;
/* Remove TTP header */
- skb_pull( skb, TTP_HEADER);
+ skb_pull(skb, TTP_HEADER);
/* Add the length of the remaining data */
self->rx_sdu_size += skb->len;
@@ -1109,57 +1183,61 @@ void irttp_run_rx_queue( struct tsap_cb *self)
* immediately. This can be requested by clients that
* implements byte streams without any message boundaries
*/
- if ((self->no_defrag) || (self->rx_max_sdu_size == 0)) {
- self->notify.data_indication( instance, self, skb);
+ if (self->rx_max_sdu_size == SAR_DISABLE) {
+ irttp_do_data_indication(self, skb);
self->rx_sdu_size = 0;
continue;
}
/* Check if this is a fragment, and not the last fragment */
- if ( more) {
+ if (more) {
/*
* Queue the fragment if we still are within the
* limits of the maximum size of the rx_sdu
*/
- if ( self->rx_sdu_size <= self->rx_max_sdu_size) {
- DEBUG( 4, __FUNCTION__
- "(), queueing fragment\n");
-
- skb_queue_tail( &self->rx_fragments, skb);
+ if (self->rx_sdu_size <= self->rx_max_sdu_size) {
+ DEBUG(4, __FUNCTION__ "(), queueing frag\n");
+ skb_queue_tail(&self->rx_fragments, skb);
} else {
- DEBUG( 0, __FUNCTION__ "(), Error!\n");
+ /* Free the part of the SDU that is too big */
+ dev_kfree_skb(skb);
}
- } else {
- /*
- * This is the last fragment, so time to reassemble!
+ continue;
+ }
+ /*
+ * This is the last fragment, so time to reassemble!
+ */
+ if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
+ (self->rx_max_sdu_size == SAR_UNBOUND))
+ {
+ /*
+ * A little optimizing. Only queue the fragment if
+ * there are other fragments. Since if this is the
+ * last and only fragment, there is no need to
+ * reassemble :-)
*/
- if ( self->rx_sdu_size <= self->rx_max_sdu_size) {
-
- /* A little optimizing. Only queue the
- * fragment if there is other fragments. Since
- * if this is the last and only fragment,
- * there is no need to reassemble
- */
- if ( !skb_queue_empty( &self->rx_fragments)) {
-
- DEBUG( 4, __FUNCTION__
- "(), queueing fragment\n");
- skb_queue_tail( &self->rx_fragments,
- skb);
-
- skb = irttp_reassemble_skb( self);
- }
- self->notify.data_indication( instance, self,
- skb);
- } else {
- DEBUG( 0, __FUNCTION__
- "(), Truncated frame\n");
- self->notify.data_indication(
- self->notify.instance, self, skb);
+ if (!skb_queue_empty(&self->rx_fragments)) {
+ skb_queue_tail(&self->rx_fragments,
+ skb);
+
+ skb = irttp_reassemble_skb(self);
}
- self->rx_sdu_size = 0;
+
+ /* Now we can deliver the reassembled skb */
+ irttp_do_data_indication(self, skb);
+ } else {
+ DEBUG(1, __FUNCTION__ "(), Truncated frame\n");
+
+ /* Free the part of the SDU that is too big */
+ dev_kfree_skb(skb);
+
+ /* Deliver only the valid but truncated part of SDU */
+ skb = irttp_reassemble_skb(self);
+
+ irttp_do_data_indication(self, skb);
}
+ self->rx_sdu_size = 0;
}
/* Reset lock */
self->rx_queue_lock = 0;
@@ -1170,28 +1248,26 @@ void irttp_run_rx_queue( struct tsap_cb *self)
*
* Flushes (removes all frames) in transitt-buffer (tx_list)
*/
-void irttp_flush_queues( struct tsap_cb *self)
+void irttp_flush_queues(struct tsap_cb *self)
{
struct sk_buff* skb;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
/* Deallocate frames waiting to be sent */
- while (( skb = skb_dequeue( &self->tx_queue)) != NULL) {
- dev_kfree_skb( skb);
- }
+ while ((skb = skb_dequeue(&self->tx_queue)) != NULL)
+ dev_kfree_skb(skb);
+
/* Deallocate received frames */
- while (( skb = skb_dequeue( &self->rx_queue)) != NULL) {
- dev_kfree_skb( skb);
- }
- /* Deallocate received fragments */
- while (( skb = skb_dequeue( &self->rx_fragments)) != NULL) {
- dev_kfree_skb( skb);
- }
+ while ((skb = skb_dequeue(&self->rx_queue)) != NULL)
+ dev_kfree_skb(skb);
+ /* Deallocate received fragments */
+ while ((skb = skb_dequeue(&self->rx_fragments)) != NULL)
+ dev_kfree_skb(skb);
}
/*
@@ -1201,40 +1277,43 @@ void irttp_flush_queues( struct tsap_cb *self)
* queue
*
*/
-static struct sk_buff *irttp_reassemble_skb( struct tsap_cb *self)
+static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self)
{
struct sk_buff *skb, *frag;
int n = 0; /* Fragment index */
+
+ ASSERT(self != NULL, return NULL;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;);
- ASSERT( self != NULL, return NULL;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return NULL;);
-
- DEBUG( 4, __FUNCTION__ "(), self->rx_sdu_size=%d\n",
- self->rx_sdu_size);
+ DEBUG(4, __FUNCTION__ "(), self->rx_sdu_size=%d\n",
+ self->rx_sdu_size);
- skb = dev_alloc_skb( self->rx_sdu_size);
- if ( !skb) {
- DEBUG( 0, __FUNCTION__ "(), unable to allocate skb\n");
+ skb = dev_alloc_skb(self->rx_sdu_size);
+ if (!skb)
return NULL;
- }
- skb_put( skb, self->rx_sdu_size);
+ /*
+ * Need to reserve space for TTP header in case this skb needs to
+ * be requeued in case delivery failes
+ */
+ skb_reserve(skb, TTP_HEADER);
+ skb_put(skb, self->rx_sdu_size);
/*
* Copy all fragments to a new buffer
*/
- while (( frag = skb_dequeue( &self->rx_fragments)) != NULL) {
- memcpy( skb->data+n, frag->data, frag->len);
+ while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) {
+ memcpy(skb->data+n, frag->data, frag->len);
n += frag->len;
- dev_kfree_skb( frag);
+ dev_kfree_skb(frag);
}
- DEBUG( 4, __FUNCTION__ "(), frame len=%d\n", n);
+ DEBUG(4, __FUNCTION__ "(), frame len=%d\n", n);
/* Set the new length */
- DEBUG( 4, __FUNCTION__ "(), rx_sdu_size=%d\n", self->rx_sdu_size);
- ASSERT( n <= self->rx_sdu_size, return NULL;);
- skb_trim( skb, n);
+ DEBUG(4, __FUNCTION__ "(), rx_sdu_size=%d\n", self->rx_sdu_size);
+ ASSERT(n <= self->rx_sdu_size, return NULL;);
+ skb_trim(skb, n);
self->rx_sdu_size = 0;
@@ -1247,60 +1326,57 @@ static struct sk_buff *irttp_reassemble_skb( struct tsap_cb *self)
* Fragments a frame and queues all the fragments for transmission
*
*/
-static void irttp_fragment_skb( struct tsap_cb *self, struct sk_buff *skb)
+static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb)
{
struct sk_buff *frag;
__u8 *frame;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
- ASSERT( skb != NULL, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(skb != NULL, return;);
/*
* Split frame into a number of segments
*/
- while ( skb->len > 0) {
+ while (skb->len > 0) {
/*
* Instead of making the last segment, we just
* queue what is left of the original skb
*/
- if ( skb->len < self->max_seg_size) {
- DEBUG( 4, __FUNCTION__
+ if (skb->len < self->max_seg_size) {
+ DEBUG(4, __FUNCTION__
"(), queuing last segment\n");
- frame = skb_push( skb, TTP_HEADER);
+ frame = skb_push(skb, TTP_HEADER);
frame[0] = 0x00; /* Clear more bit */
- skb_queue_tail( &self->tx_queue, skb);
+ skb_queue_tail(&self->tx_queue, skb);
return;
}
/* Make new segment */
- frag = dev_alloc_skb( self->max_seg_size+
- TTP_HEADER+LMP_HEADER+
- LAP_HEADER);
- if ( frag == NULL) {
- DEBUG( 0, __FUNCTION__
- "(), Couldn't allocate skbuff!\n");
+ frag = dev_alloc_skb(self->max_seg_size+
+ TTP_HEADER+LMP_HEADER+
+ LAP_HEADER);
+ if (!frag)
return;
- }
- skb_reserve( frag, LMP_HEADER+LAP_HEADER);
+ skb_reserve(frag, LMP_HEADER+LAP_HEADER);
/*
* Copy data from the original skb into this fragment. We
* first insert the TTP header with the more bit set
*/
- frame = skb_put( frag, self->max_seg_size+TTP_HEADER);
+ frame = skb_put(frag, self->max_seg_size+TTP_HEADER);
frame[0] = TTP_MORE;
- memcpy( frag->data+1, skb->data, self->max_seg_size);
+ memcpy(frag->data+1, skb->data, self->max_seg_size);
/* Hide the copied data from the original skb */
- skb_pull( skb, self->max_seg_size);
+ skb_pull(skb, self->max_seg_size);
- skb_queue_tail( &self->tx_queue, frag);
+ skb_queue_tail(&self->tx_queue, frag);
}
}
@@ -1310,31 +1386,54 @@ static void irttp_fragment_skb( struct tsap_cb *self, struct sk_buff *skb)
* Todo timer has expired!
*
*/
-static void irttp_todo_expired( unsigned long data)
+static void irttp_todo_expired(unsigned long data)
{
- struct tsap_cb *self = ( struct tsap_cb *) data;
+ struct tsap_cb *self = (struct tsap_cb *) data;
- DEBUG( 4, __FUNCTION__ "()\n");
+ DEBUG(4, __FUNCTION__ "()\n");
/* Check that we still exist */
- if ( !self || self->magic != TTP_TSAP_MAGIC) {
+ if (!self || self->magic != TTP_TSAP_MAGIC) {
return;
}
- irttp_run_rx_queue( self);
- irttp_run_tx_queue( self);
+ irttp_run_rx_queue(self);
+ irttp_run_tx_queue(self);
/* Give avay some credits to peer? */
- if (( skb_queue_empty( &self->tx_queue)) &&
- ( self->remote_credit < LOW_THRESHOLD) &&
- ( self->avail_credit > 0))
+ if ((skb_queue_empty(&self->tx_queue)) &&
+ (self->remote_credit < LOW_THRESHOLD) &&
+ (self->avail_credit > 0))
{
- DEBUG( 4, "irttp_do_events: sending credit!\n");
- irttp_give_credit( self);
+ DEBUG(4, __FUNCTION__ "(), sending credit!\n");
+ irttp_give_credit(self);
}
-
- /* Rearm! */
- /* irttp_start_todo_timer( self, 50); */
+
+ /* Check if time for disconnect */
+ if (self->disconnect_pend) {
+ /* Check if it's possible to disconnect yet */
+ if (skb_queue_empty(&self->tx_queue)) {
+
+ /* Make sure disconnect is not pending anymore */
+ self->disconnect_pend = FALSE;
+ if (self->disconnect_skb) {
+ irttp_disconnect_request(
+ self, self->disconnect_skb, P_NORMAL);
+ self->disconnect_skb = NULL;
+ } else
+ irttp_disconnect_request(self, NULL, P_NORMAL);
+ } else {
+ /* Try again later */
+ irttp_start_todo_timer(self, 100);
+
+ /* No reason to try and close now */
+ return;
+ }
+ }
+
+ /* Check if it's closing time */
+ if (self->close_pend)
+ irttp_close_tsap(self);
}
/*
@@ -1343,18 +1442,18 @@ static void irttp_todo_expired( unsigned long data)
* Start todo timer.
*
*/
-static void irttp_start_todo_timer( struct tsap_cb *self, int timeout)
+static void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
{
- ASSERT( self != NULL, return;);
- ASSERT( self->magic == TTP_TSAP_MAGIC, return;);
+ ASSERT(self != NULL, return;);
+ ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
- del_timer( &self->todo_timer);
+ del_timer(&self->todo_timer);
self->todo_timer.data = (unsigned long) self;
self->todo_timer.function = &irttp_todo_expired;
self->todo_timer.expires = jiffies + timeout;
- add_timer( &self->todo_timer);
+ add_timer(&self->todo_timer);
}
#ifdef CONFIG_PROC_FS
@@ -1363,65 +1462,64 @@ static void irttp_start_todo_timer( struct tsap_cb *self, int timeout)
*
* Give some info to the /proc file system
*/
-int irttp_proc_read( char *buf, char **start, off_t offset, int len,
- int unused)
+int irttp_proc_read(char *buf, char **start, off_t offset, int len, int unused)
{
struct tsap_cb *self;
unsigned long flags;
int i = 0;
- ASSERT( irttp != NULL, return 0;);
+ ASSERT(irttp != NULL, return 0;);
len = 0;
save_flags(flags);
cli();
- self = ( struct tsap_cb *) hashbin_get_first( irttp->tsaps);
- while ( self != NULL) {
- if ( !self || self->magic != TTP_TSAP_MAGIC) {
- DEBUG( 0, "irttp_proc_read: bad ptr self\n");
+ self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
+ while (self != NULL) {
+ if (!self || self->magic != TTP_TSAP_MAGIC) {
+ DEBUG(1, "irttp_proc_read: bad ptr self\n");
return len;
}
- len += sprintf( buf+len, "TSAP %d, ", i++);
- len += sprintf( buf+len, "stsap_sel: %02x, ",
+ len += sprintf(buf+len, "TSAP %d, ", i++);
+ len += sprintf(buf+len, "stsap_sel: %02x, ",
self->stsap_sel);
- len += sprintf( buf+len, "dtsap_sel: %02x\n",
+ len += sprintf(buf+len, "dtsap_sel: %02x\n",
self->dtsap_sel);
- len += sprintf( buf+len, " connected: %s, ",
+ len += sprintf(buf+len, " connected: %s, ",
self->connected? "TRUE":"FALSE");
- len += sprintf( buf+len, "avail credit: %d, ",
+ len += sprintf(buf+len, "avail credit: %d, ",
self->avail_credit);
- len += sprintf( buf+len, "remote credit: %d, ",
+ len += sprintf(buf+len, "remote credit: %d, ",
self->remote_credit);
- len += sprintf( buf+len, "send credit: %d\n",
+ len += sprintf(buf+len, "send credit: %d\n",
self->send_credit);
- len += sprintf( buf+len, " tx packets: %d, ",
+ len += sprintf(buf+len, " tx packets: %d, ",
self->stats.tx_packets);
- len += sprintf( buf+len, "rx packets: %d, ",
+ len += sprintf(buf+len, "rx packets: %d, ",
self->stats.rx_packets);
- len += sprintf( buf+len, "tx_queue len: %d ",
- skb_queue_len( &self->tx_queue));
- len += sprintf( buf+len, "rx_queue len: %d\n",
- skb_queue_len( &self->rx_queue));
- len += sprintf( buf+len, " tx_sdu_busy: %s, ",
+ len += sprintf(buf+len, "tx_queue len: %d ",
+ skb_queue_len(&self->tx_queue));
+ len += sprintf(buf+len, "rx_queue len: %d\n",
+ skb_queue_len(&self->rx_queue));
+ len += sprintf(buf+len, " tx_sdu_busy: %s, ",
self->tx_sdu_busy? "TRUE":"FALSE");
- len += sprintf( buf+len, "rx_sdu_busy: %s\n",
+ len += sprintf(buf+len, "rx_sdu_busy: %s\n",
self->rx_sdu_busy? "TRUE":"FALSE");
- len += sprintf( buf+len, " max_seg_size: %d, ",
+ len += sprintf(buf+len, " max_seg_size: %d, ",
self->max_seg_size);
- len += sprintf( buf+len, "tx_max_sdu_size: %d, ",
+ len += sprintf(buf+len, "tx_max_sdu_size: %d, ",
self->tx_max_sdu_size);
- len += sprintf( buf+len, "rx_max_sdu_size: %d\n",
+ len += sprintf(buf+len, "rx_max_sdu_size: %d\n",
self->rx_max_sdu_size);
- len += sprintf( buf+len, " Used by (%s)\n",
+ len += sprintf(buf+len, " Used by (%s)\n",
self->notify.name);
- len += sprintf( buf+len, "\n");
+ len += sprintf(buf+len, "\n");
- self = ( struct tsap_cb *) hashbin_get_next( irttp->tsaps);
+ self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps);
}
restore_flags(flags);
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 215666aa0..7b226dfa6 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: qos.c
- * Version: 0.1
+ * Version: 0.8
* Description: IrLAP QoS negotiation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Sep 9 00:00:26 1997
- * Modified at: Sat Dec 12 12:21:42 1998
+ * Modified at: Mon Apr 12 11:49:24 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
@@ -70,7 +70,7 @@ void irda_qos_compute_intersection( struct qos_info *qos, struct qos_info *new)
qos->compression.bits &= new->compression.bits;
#endif
- irda_qos_bits_to_value( qos);
+ irda_qos_bits_to_value(qos);
}
/*
@@ -109,8 +109,8 @@ void irda_init_max_qos_capabilies( struct qos_info *qos)
* We just set the QoS capabilities for the peer station
*
*/
-void irda_qos_negotiate( struct qos_info *qos_rx, struct qos_info *qos_tx,
- struct sk_buff *skb)
+void irda_qos_negotiate(struct qos_info *qos_rx, struct qos_info *qos_tx,
+ struct sk_buff *skb)
{
int n=0;
#ifdef CONFIG_IRDA_COMPRESSION
@@ -162,7 +162,7 @@ void irda_qos_negotiate( struct qos_info *qos_rx, struct qos_info *qos_tx,
break;
}
- switch( code) {
+ switch(code) {
case PI_BAUD_RATE:
/*
* Stations must agree on baud rate, so calculate
@@ -178,7 +178,8 @@ void irda_qos_negotiate( struct qos_info *qos_rx, struct qos_info *qos_tx,
/*
* Negotiated independently for each station
*/
- DEBUG( 4, "MAX_TURN_TIME: %02x\n", byte);
+ DEBUG(4, __FUNCTION__ "(), MAX_TURN_TIME: %02x\n",
+ byte);
qos_tx->max_turn_time.bits = byte;
break;
case PI_DATA_SIZE:
@@ -268,7 +269,7 @@ void irda_qos_negotiate( struct qos_info *qos_rx, struct qos_info *qos_tx,
* Insert QoS negotiaion pararameters into frame
*
*/
-int irda_insert_qos_negotiation_params( struct qos_info *qos, __u8 *frame)
+int irda_insert_qos_negotiation_params(struct qos_info *qos, __u8 *frame)
{
int n;
__u16_host_order word;
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 95c18f13f..ed3929a72 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -6,7 +6,7 @@
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sat Aug 16 00:59:29 1997
- * Modified at: Wed Dec 9 01:34:59 1998
+ * Modified at: Thu Feb 4 10:49:38 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1997 Dag Brattli <dagb@cs.uit.no>, All Rights Reserved.
@@ -100,11 +100,16 @@ inline void irlmp_start_watchdog_timer( struct lsap_cb *self, int timeout)
irlmp_watchdog_timer_expired);
}
-inline void irlmp_start_discovery_timer( struct irlmp_cb *self, int timeout)
+inline void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout)
{
- irda_start_timer( &self->discovery_timer, timeout,
- (unsigned long) self,
- irlmp_discovery_timer_expired);
+ irda_start_timer(&self->discovery_timer, timeout, (unsigned long) self,
+ irlmp_discovery_timer_expired);
+}
+
+inline void irlmp_start_idle_timer(struct lap_cb *self, int timeout)
+{
+ irda_start_timer(&self->idle_timer, timeout, (unsigned long) self,
+ irlmp_idle_timer_expired);
}
/*
diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c
index fdebb5788..c4822e2c6 100644
--- a/net/irda/wrapper.c
+++ b/net/irda/wrapper.c
@@ -1,12 +1,12 @@
/*********************************************************************
*
* Filename: wrapper.c
- * Version:
- * Description: IrDA Wrapper layer
+ * Version: 1.1
+ * Description: SIR wrapper layer
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Mon Aug 4 20:40:53 1997
- * Modified at: Sat Jan 16 22:05:45 1999
+ * Modified at: Wed Apr 21 12:45:55 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998 Dag Brattli <dagb@cs.uit.no>,
@@ -34,9 +34,7 @@
#include <net/irda/irlap_frame.h>
#include <net/irda/irda_device.h>
-#define MIN_LENGTH 14
-
-__inline__ static int stuff_byte( __u8 byte, __u8 *buf);
+inline static int stuff_byte(__u8 byte, __u8 *buf);
/*
* Function async_wrap (skb, *tx_buff)
@@ -44,28 +42,25 @@ __inline__ static int stuff_byte( __u8 byte, __u8 *buf);
* Makes a new buffer with wrapping and stuffing, should check that
* we don't get tx buffer overflow.
*/
-int async_wrap_skb( struct sk_buff *skb, __u8 *tx_buff, int buffsize)
+int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize)
{
- __u8 byte;
- int i, n;
+ int i;
+ int n;
int xbofs;
union {
__u16 value;
__u8 bytes[2];
} fcs;
-
- DEBUG( 6, __FUNCTION__ "()\n");
- ASSERT( skb != NULL, return 0;);
+ ASSERT(skb != NULL, return 0;);
/* Initialize variables */
fcs.value = INIT_FCS;
n = 0;
- if ( skb->len > 2048) {
- DEBUG( 0,"async_xmit: Warning size=%d of sk_buff to big!\n",
- (int) skb->len);
-
+ if (skb->len > 2048) {
+ DEBUG(0, __FUNCTION__ "Warning size=%d of sk_buff to big!\n",
+ (int) skb->len);
return 0;
}
@@ -73,43 +68,55 @@ int async_wrap_skb( struct sk_buff *skb, __u8 *tx_buff, int buffsize)
* Send XBOF's for required min. turn time and for the negotiated
* additional XBOFS
*/
- xbofs = ((struct irlap_skb_cb *)(skb->cb))->xbofs;
- for ( i=0; i<xbofs; i++) {
- tx_buff[n++] = XBOF;
- }
+ if (((struct irlap_skb_cb *)(skb->cb))->magic != LAP_MAGIC) {
+ DEBUG(1, __FUNCTION__ "(), wrong magic in skb!\n");
+ xbofs = 10;
+ } else
+ xbofs = ((struct irlap_skb_cb *)(skb->cb))->xbofs;
+#if 0
+ for (i=0; i<xbofs; i++)
+ tx_buff[n++] = XBOF;
+#else
+ memset(tx_buff+n, XBOF, xbofs);
+ n += xbofs;
+#endif
/* Start of packet character BOF */
tx_buff[n++] = BOF;
/* Insert frame and calc CRC */
- for( i=0; i < skb->len; i++) {
- byte = skb->data[i];
-
+ for (i=0; i < skb->len; i++) {
/*
* Check for the possibility of tx buffer overflow. We use
* bufsize-5 since the maximum number of bytes that can be
* transmitted after this point is 5.
*/
- if ( n > buffsize-5) {
- printk( KERN_WARNING
- "IrDA Wrapper: TX-buffer overflow!\n");
- return n;
- }
- n+=stuff_byte( byte, tx_buff+n);
- fcs.value = IR_FCS( fcs.value, byte);
+ ASSERT(n < (buffsize-5), return n;);
+
+ n += stuff_byte(skb->data[i], tx_buff+n);
+ fcs.value = IR_FCS(fcs.value, skb->data[i]);
}
/* Insert CRC in little endian format (LSB first) */
fcs.value = ~fcs.value;
#ifdef __LITTLE_ENDIAN
- n += stuff_byte( fcs.bytes[0], tx_buff+n);
- n += stuff_byte( fcs.bytes[1], tx_buff+n);
+ n += stuff_byte(fcs.bytes[0], tx_buff+n);
+ n += stuff_byte(fcs.bytes[1], tx_buff+n);
#else ifdef __BIG_ENDIAN
- n += stuff_byte( fcs.bytes[1], tx_buff+n);
- n += stuff_byte( fcs.bytes[0], tx_buff+n);
+ n += stuff_byte(fcs.bytes[1], tx_buff+n);
+ n += stuff_byte(fcs.bytes[0], tx_buff+n);
#endif
tx_buff[n++] = EOF;
-
+
+#if 0
+ {
+ int i;
+
+ for (i=0;i<n;i++)
+ printk("%02x", tx_buff[i]);
+ printk("\n");
+ }
+#endif
return n;
}
@@ -119,62 +126,62 @@ int async_wrap_skb( struct sk_buff *skb, __u8 *tx_buff, int buffsize)
* Got a frame, make a copy of it, and pass it up the stack!
*
*/
-static __inline__ void async_bump( struct irda_device *idev, __u8 *buf,
- int len)
+static inline void async_bump(struct irda_device *idev, __u8 *buf, int len)
{
struct sk_buff *skb;
-
- skb = dev_alloc_skb( len+1);
- if (skb == NULL) {
- printk( KERN_INFO __FUNCTION__ "() memory squeeze, "
- "dropping frame.\n");
+
+ skb = dev_alloc_skb(len+1);
+ if (!skb) {
idev->stats.rx_dropped++;
return;
}
- /* Align to 20 bytes */
- skb_reserve( skb, 1);
+ /* Align IP header to 20 bytes */
+ skb_reserve(skb, 1);
- ASSERT( len-2 > 0, return;);
-
/* Copy data without CRC */
- skb_put( skb, len-2);
- memcpy( skb->data, buf, len-2);
+ memcpy(skb_put(skb, len-2), buf, len-2);
- idev->rx_buff.len = 0;
/*
* Feed it to IrLAP layer
*/
- /* memcpy(skb_put(skb,count), ax->rbuff, count); */
skb->dev = &idev->netdev;
skb->mac.raw = skb->data;
skb->protocol = htons(ETH_P_IRDA);
- netif_rx( skb);
+ netif_rx(skb);
idev->stats.rx_packets++;
idev->stats.rx_bytes += skb->len;
}
-
+
/*
* Function async_unwrap (skb)
*
* Parse and de-stuff frame received from the IR-port
*
*/
-void async_unwrap_char( struct irda_device *idev, __u8 byte)
+void async_unwrap_char(struct irda_device *idev, __u8 byte)
{
/* State machine for receiving frames */
- switch( idev->rx_buff.state) {
+ switch (idev->rx_buff.state) {
case OUTSIDE_FRAME:
- if ( byte == BOF) {
+ switch(byte) {
+ case BOF:
idev->rx_buff.state = BEGIN_FRAME;
idev->rx_buff.in_frame = TRUE;
- } else if ( byte == EOF) {
+ break;
+ case XBOF:
+ /* idev->xbofs++; */
+ break;
+ case EOF:
irda_device_set_media_busy( idev, TRUE);
+ break;
+ default:
+ break;
}
break;
case BEGIN_FRAME:
- switch ( byte) {
+ switch (byte) {
case BOF:
/* Continue */
break;
@@ -191,33 +198,29 @@ void async_unwrap_char( struct irda_device *idev, __u8 byte)
break;
default:
/* Got first byte of frame */
- if ( idev->rx_buff.len < idev->rx_buff.truesize) {
- idev->rx_buff.data[ idev->rx_buff.len++] = byte;
+ idev->rx_buff.data = idev->rx_buff.head;
+ idev->rx_buff.len = 0;
+
+ idev->rx_buff.data[idev->rx_buff.len++] = byte;
- idev->rx_buff.fcs = IR_FCS( INIT_FCS, byte);
- idev->rx_buff.state = INSIDE_FRAME;
- } else
- printk( "Rx buffer overflow\n");
+ idev->rx_buff.fcs = IR_FCS(INIT_FCS, byte);
+ idev->rx_buff.state = INSIDE_FRAME;
break;
}
break;
case LINK_ESCAPE:
- switch ( byte) {
+ switch (byte) {
case BOF:
/* New frame? */
- DEBUG( 4, "New frame?\n");
idev->rx_buff.state = BEGIN_FRAME;
- idev->rx_buff.len = 0;
- irda_device_set_media_busy( idev, TRUE);
+ irda_device_set_media_busy(idev, TRUE);
break;
case CE:
- DEBUG( 4, "WARNING: State not defined\n");
+ DEBUG(4, "WARNING: State not defined\n");
break;
case EOF:
/* Abort frame */
- DEBUG( 0, "Abort frame (2)\n");
idev->rx_buff.state = OUTSIDE_FRAME;
- idev->rx_buff.len = 0;
break;
default:
/*
@@ -225,23 +228,25 @@ void async_unwrap_char( struct irda_device *idev, __u8 byte)
* following CE, IrLAP p.114
*/
byte ^= IR_TRANS;
- if ( idev->rx_buff.len < idev->rx_buff.truesize) {
- idev->rx_buff.data[ idev->rx_buff.len++] = byte;
-
- idev->rx_buff.fcs = IR_FCS( idev->rx_buff.fcs, byte);
+ if (idev->rx_buff.len < idev->rx_buff.truesize) {
+ idev->rx_buff.data[idev->rx_buff.len++] = byte;
+ idev->rx_buff.fcs = IR_FCS(idev->rx_buff.fcs,
+ byte);
idev->rx_buff.state = INSIDE_FRAME;
- } else
- printk( "Rx buffer overflow\n");
+ } else {
+ DEBUG(1, __FUNCTION__
+ "(), Rx buffer overflow, aborting\n");
+ idev->rx_buff.state = OUTSIDE_FRAME;
+ }
break;
}
break;
case INSIDE_FRAME:
- switch ( byte) {
+ switch (byte) {
case BOF:
/* New frame? */
idev->rx_buff.state = BEGIN_FRAME;
- idev->rx_buff.len = 0;
- irda_device_set_media_busy( idev, TRUE);
+ irda_device_set_media_busy(idev, TRUE);
break;
case CE:
/* Stuffed char */
@@ -255,13 +260,12 @@ void async_unwrap_char( struct irda_device *idev, __u8 byte)
/*
* Test FCS and deliver frame if it's good
*/
- if ( idev->rx_buff.fcs == GOOD_FCS) {
- async_bump( idev, idev->rx_buff.data,
- idev->rx_buff.len);
+ if (idev->rx_buff.fcs == GOOD_FCS) {
+ async_bump(idev, idev->rx_buff.data,
+ idev->rx_buff.len);
} else {
/* Wrong CRC, discard frame! */
- irda_device_set_media_busy( idev, TRUE);
- idev->rx_buff.len = 0;
+ irda_device_set_media_busy(idev, TRUE);
idev->stats.rx_errors++;
idev->stats.rx_crc_errors++;
@@ -269,12 +273,15 @@ void async_unwrap_char( struct irda_device *idev, __u8 byte)
break;
default:
/* Next byte of frame */
- if ( idev->rx_buff.len < idev->rx_buff.truesize) {
- idev->rx_buff.data[ idev->rx_buff.len++] = byte;
-
- idev->rx_buff.fcs = IR_FCS( idev->rx_buff.fcs, byte);
- } else
- printk( "Rx buffer overflow\n");
+ if (idev->rx_buff.len < idev->rx_buff.truesize) {
+ idev->rx_buff.data[idev->rx_buff.len++] = byte;
+ idev->rx_buff.fcs = IR_FCS(idev->rx_buff.fcs,
+ byte);
+ } else {
+ DEBUG(1, __FUNCTION__
+ "(), Rx buffer overflow, aborting\n");
+ idev->rx_buff.state = OUTSIDE_FRAME;
+ }
break;
}
break;
@@ -288,11 +295,11 @@ void async_unwrap_char( struct irda_device *idev, __u8 byte)
* buf. The buffer must at all times be able to have two bytes inserted.
*
*/
-__inline__ static int stuff_byte( __u8 byte, __u8 *buf)
+inline static int stuff_byte(__u8 byte, __u8 *buf)
{
- switch ( byte) {
- case BOF:
- case EOF:
+ switch (byte) {
+ case BOF: /* FALLTHROUGH */
+ case EOF: /* FALLTHROUGH */
case CE:
/* Insert transparently coded */
buf[0] = CE; /* Send link escape */
@@ -308,3 +315,5 @@ __inline__ static int stuff_byte( __u8 byte, __u8 *buf)
}
+
+
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a281c966b..9247bf99c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -25,7 +25,6 @@
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
-#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
@@ -55,6 +54,13 @@ static struct socket *netlink_kernel[MAX_LINKS];
static int netlink_dump(struct sock *sk);
static void netlink_destroy_callback(struct netlink_callback *cb);
+/* Netlink table lock. It protects against sk list changes
+ during uninterruptible sleeps in netlink_broadcast.
+
+ These lock MUST NOT be used from bh/irq on SMP kernels, because
+ It would result in race in netlink_wait_on_table.
+ */
+
extern __inline__ void
netlink_wait_on_table(int protocol)
{
@@ -69,16 +75,16 @@ netlink_lock_table(int protocol)
}
extern __inline__ void
-netlink_unlock_table(int protocol, int wakeup)
+netlink_unlock_table(int protocol)
{
#if 0
/* F...g gcc does not eat it! */
- if (atomic_dec_and_test(&nl_table_lock[protocol]) && wakeup)
+ if (atomic_dec_and_test(&nl_table_lock[protocol]))
wake_up(&nl_table_wait);
#else
atomic_dec(&nl_table_lock[protocol]);
- if (atomic_read(&nl_table_lock[protocol]) && wakeup)
+ if (!atomic_read(&nl_table_lock[protocol]))
wake_up(&nl_table_wait);
#endif
}
@@ -125,7 +131,9 @@ static void netlink_remove(struct sock *sk)
struct sock **skp;
for (skp = &nl_table[sk->protocol]; *skp; skp = &((*skp)->next)) {
if (*skp == sk) {
+ start_bh_atomic();
*skp = sk->next;
+ end_bh_atomic();
return;
}
}
@@ -186,7 +194,7 @@ static int netlink_release(struct socket *sock, struct socket *peer)
transport (and AF_UNIX datagram, when it will be repaired).
Someone could wait on our sock->wait now.
- We cannot release socket until waiter will remove yourself
+ We cannot release socket until waiter will remove itself
from wait queue. I choose the most conservetive way of solving
the problem.
@@ -218,8 +226,6 @@ static int netlink_autobind(struct socket *sock)
struct sock *sk = sock->sk;
struct sock *osk;
- netlink_wait_on_table(sk->protocol);
-
sk->protinfo.af_netlink.groups = 0;
sk->protinfo.af_netlink.pid = current->pid;
@@ -264,8 +270,6 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
return 0;
}
- netlink_wait_on_table(sk->protocol);
-
for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
if (osk->protinfo.af_netlink.pid == nladdr->nl_pid)
return -EADDRINUSE;
@@ -332,7 +336,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock
retry:
for (sk = nl_table[protocol]; sk; sk = sk->next) {
if (sk->protinfo.af_netlink.pid != pid)
- continue;
+ continue;
netlink_lock(sk);
@@ -416,7 +420,8 @@ void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
/* While we sleep in clone, do not allow to change socket list */
- netlink_lock_table(protocol);
+ if (allocation == GFP_KERNEL)
+ netlink_lock_table(protocol);
for (sk = nl_table[protocol]; sk; sk = sk->next) {
if (ssk == sk)
@@ -454,7 +459,8 @@ void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
netlink_unlock(sk);
}
- netlink_unlock_table(protocol, allocation == GFP_KERNEL);
+ if (allocation == GFP_KERNEL)
+ netlink_unlock_table(protocol);
if (skb2)
kfree_skb(skb2);
@@ -475,7 +481,7 @@ Nprintk("seterr");
!(sk->protinfo.af_netlink.groups&group))
continue;
- sk->err = -code;
+ sk->err = code;
sk->state_change(sk);
}
}
@@ -739,15 +745,20 @@ int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
void netlink_detach(int unit)
{
struct socket *sock = netlink_kernel[unit];
+
netlink_kernel[unit] = NULL;
+ synchronize_bh();
+
sock_release(sock);
}
int netlink_post(int unit, struct sk_buff *skb)
{
- if (netlink_kernel[unit]) {
+ struct socket *sock = netlink_kernel[unit];
+ barrier();
+ if (sock) {
memset(skb->cb, 0, sizeof(skb->cb));
- netlink_broadcast(netlink_kernel[unit]->sk, skb, 0, ~0, GFP_ATOMIC);
+ netlink_broadcast(sock->sk, skb, 0, ~0, GFP_ATOMIC);
return 0;
}
return -EUNATCH;;
@@ -800,6 +811,8 @@ done:
len-=(offset-begin);
if(len>length)
len=length;
+ if(len<0)
+ len=0;
return len;
}
#endif
diff --git a/net/netsyms.c b/net/netsyms.c
index 4c6de6fee..764900d50 100644
--- a/net/netsyms.c
+++ b/net/netsyms.c
@@ -12,6 +12,7 @@
#include <linux/net.h>
#include <linux/in.h>
#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
#include <linux/fddidevice.h>
#include <linux/trdevice.h>
#include <linux/ioport.h>
@@ -20,6 +21,9 @@
#include <net/dst.h>
#include <net/checksum.h>
#include <linux/etherdevice.h>
+#ifdef CONFIG_HIPPI
+#include <linux/hippidevice.h>
+#endif
#include <net/pkt_sched.h>
#ifdef CONFIG_BRIDGE
@@ -44,6 +48,8 @@
#include <linux/igmp.h>
extern struct net_proto_family inet_family_ops;
+extern __u32 sysctl_wmem_max;
+extern __u32 sysctl_rmem_max;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
#include <linux/in6.h>
@@ -239,6 +245,10 @@ EXPORT_SYMBOL(inet_dgram_ops);
EXPORT_SYMBOL(ip_cmsg_recv);
EXPORT_SYMBOL(__release_sock);
+/* Route manipulation */
+EXPORT_SYMBOL(ip_rt_ioctl);
+EXPORT_SYMBOL(devinet_ioctl);
+
/* needed for ip_gre -cw */
EXPORT_SYMBOL(ip_statistics);
@@ -382,6 +392,9 @@ EXPORT_SYMBOL(rtnl_unlock);
EXPORT_SYMBOL(ipv4_config);
EXPORT_SYMBOL(dev_open);
+/* Used by other modules */
+EXPORT_SYMBOL(in_ntoa);
+
EXPORT_SYMBOL(ip_rcv);
EXPORT_SYMBOL(arp_rcv);
EXPORT_SYMBOL(arp_tbl);
@@ -425,12 +438,14 @@ EXPORT_SYMBOL(register_netdevice);
EXPORT_SYMBOL(unregister_netdevice);
EXPORT_SYMBOL(register_netdev);
EXPORT_SYMBOL(unregister_netdev);
+EXPORT_SYMBOL(netdev_state_change);
EXPORT_SYMBOL(ether_setup);
EXPORT_SYMBOL(dev_new_index);
EXPORT_SYMBOL(dev_get_by_index);
EXPORT_SYMBOL(eth_type_trans);
#ifdef CONFIG_FDDI
EXPORT_SYMBOL(fddi_type_trans);
+EXPORT_SYMBOL(fddi_setup);
#endif /* CONFIG_FDDI */
EXPORT_SYMBOL(eth_copy_and_sum);
EXPORT_SYMBOL(alloc_skb);
@@ -465,6 +480,15 @@ EXPORT_SYMBOL(kill_fasync);
EXPORT_SYMBOL(if_port_text);
+#ifdef CONFIG_HIPPI
+EXPORT_SYMBOL(hippi_type_trans);
+EXPORT_SYMBOL(init_hippi_dev);
+EXPORT_SYMBOL(unregister_hipdev);
+#endif
+
+EXPORT_SYMBOL(sysctl_wmem_max);
+EXPORT_SYMBOL(sysctl_rmem_max);
+
#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
#include<linux/if_ltalk.h>
EXPORT_SYMBOL(ltalk_setup);
@@ -479,6 +503,7 @@ EXPORT_SYMBOL(qdisc_head);
EXPORT_SYMBOL(qdisc_create_dflt);
EXPORT_SYMBOL(noop_qdisc);
#ifdef CONFIG_NET_SCHED
+PSCHED_EXPORTLIST;
EXPORT_SYMBOL(pfifo_qdisc_ops);
EXPORT_SYMBOL(register_qdisc);
EXPORT_SYMBOL(unregister_qdisc);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c7e7a6733..e78e41352 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -5,7 +5,7 @@
*
* PACKET - implements raw packet sockets.
*
- * Version: $Id: af_packet.c,v 1.18 1998/10/03 15:55:24 freitag Exp $
+ * Version: $Id: af_packet.c,v 1.19 1999/03/21 05:23:03 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -831,7 +831,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, int len,
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
- err = copied;
+ err = (flags&MSG_TRUNC) ? skb->len : copied;
out_free:
skb_free_datagram(sk, skb);
diff --git a/net/sched/Config.in b/net/sched/Config.in
index 5d497a050..ffb7a4810 100644
--- a/net/sched/Config.in
+++ b/net/sched/Config.in
@@ -18,10 +18,11 @@ if [ "$CONFIG_NET_QOS" = "y" ]; then
fi
bool 'Packet classifier API' CONFIG_NET_CLS
if [ "$CONFIG_NET_CLS" = "y" ]; then
- bool 'Routing tables based classifier' CONFIG_NET_CLS_ROUTE
- if [ "$CONFIG_IP_FIREWALL" = "y" ]; then
- bool 'Firewall based classifier' CONFIG_NET_CLS_FW
+ tristate 'Routing table based classifier' CONFIG_NET_CLS_ROUTE4
+ if [ "$CONFIG_NET_CLS_ROUTE4" != "n" ]; then
+ define_bool CONFIG_NET_CLS_ROUTE y
fi
+ tristate 'Firewall based classifier' CONFIG_NET_CLS_FW
tristate 'U32 classifier' CONFIG_NET_CLS_U32
if [ "$CONFIG_NET_QOS" = "y" ]; then
tristate 'Special RSVP classifier' CONFIG_NET_CLS_RSVP
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 21a1cf07a..6e1169fab 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -125,12 +125,20 @@ else
endif
endif
-ifeq ($(CONFIG_NET_CLS_ROUTE), y)
+ifeq ($(CONFIG_NET_CLS_ROUTE4), y)
O_OBJS += cls_route.o
+else
+ ifeq ($(CONFIG_NET_CLS_ROUTE4), m)
+ M_OBJS += cls_route.o
+ endif
endif
ifeq ($(CONFIG_NET_CLS_FW), y)
O_OBJS += cls_fw.o
+else
+ ifeq ($(CONFIG_NET_CLS_FW), m)
+ M_OBJS += cls_fw.o
+ endif
endif
endif
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 081896dc5..683063137 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -7,6 +7,10 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes:
+ *
+ * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
*/
#include <asm/uaccess.h>
@@ -27,6 +31,7 @@
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
+#include <linux/kmod.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -87,21 +92,13 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
/* Select new prio value from the range, managed by kernel. */
-static __inline__ u32 tcf_auto_prio(struct tcf_proto *tp, u32 prio)
+static __inline__ u32 tcf_auto_prio(struct tcf_proto *tp)
{
u32 first = TC_H_MAKE(0xC0000000U,0U);
- if (!tp || tp->next == NULL)
- return first;
-
- if (prio == TC_H_MAKE(0xFFFF0000U,0U))
- first = tp->prio+1;
- else
+ if (tp)
first = tp->prio-1;
- if (first == prio)
- first = tp->prio;
-
return first;
}
@@ -129,10 +126,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
/* If no priority is given, user wants we allocated it. */
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
- if (n->nlmsg_flags&NLM_F_APPEND)
- prio = TC_H_MAKE(0xFFFF0000U,0U);
- else
- prio = TC_H_MAKE(0x80000000U,0U);
+ prio = TC_H_MAKE(0x80000000U,0U);
}
/* Find head of filter chain. */
@@ -194,6 +188,18 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if ((tp = kmalloc(sizeof(*tp), GFP_KERNEL)) == NULL)
goto errout;
tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);
+#ifdef CONFIG_KMOD
+ if (tp_ops==NULL && tca[TCA_KIND-1] != NULL) {
+ struct rtattr *kind = tca[TCA_KIND-1];
+ char module_name[4 + IFNAMSIZ + 1];
+
+ if (RTA_PAYLOAD(kind) <= IFNAMSIZ) {
+ sprintf(module_name, "cls_%s", (char*)RTA_DATA(kind));
+ request_module (module_name);
+ tp_ops = tcf_proto_lookup_ops(kind);
+ }
+ }
+#endif
if (tp_ops == NULL) {
err = -EINVAL;
kfree(tp);
@@ -202,7 +208,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
memset(tp, 0, sizeof(*tp));
tp->ops = tp_ops;
tp->protocol = protocol;
- tp->prio = nprio ? : tcf_auto_prio(*back, prio);
+ tp->prio = nprio ? : tcf_auto_prio(*back);
tp->q = q;
tp->classify = tp_ops->classify;
tp->classid = parent;
@@ -221,6 +227,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
*back = tp->next;
+ synchronize_bh();
+
tp->ops->destroy(tp);
kfree(tp);
err = 0;
@@ -249,7 +257,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
}
- err = tp->ops->change(tp, t->tcm_handle, tca, &fh);
+ err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
if (err == 0)
tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
@@ -336,12 +344,16 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return skb->len;
- if ((q = qdisc_lookup(dev, tcm->tcm_parent)) == NULL)
+ if (!tcm->tcm_parent)
+ q = dev->qdisc_sleeping;
+ else
+ q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
+ if (q == NULL)
return skb->len;
- cops = q->ops->cl_ops;
+ if ((cops = q->ops->cl_ops) == NULL)
+ goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
- if (cops)
- cl = cops->get(q, tcm->tcm_parent);
+ cl = cops->get(q, tcm->tcm_parent);
if (cl == 0)
goto errout;
}
@@ -360,7 +372,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
TC_H_MIN(tcm->tcm_info) != tp->protocol)
continue;
if (t > s_t)
- memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(int));
+ memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
if (cb->args[1] == 0) {
if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) {
@@ -418,8 +430,8 @@ __initfunc(int tc_filter_init(void))
#ifdef CONFIG_NET_CLS_U32
INIT_TC_FILTER(u32);
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
- INIT_TC_FILTER(route);
+#ifdef CONFIG_NET_CLS_ROUTE4
+ INIT_TC_FILTER(route4);
#endif
#ifdef CONFIG_NET_CLS_FW
INIT_TC_FILTER(fw);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 0fab64dda..e92b846ee 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -1,5 +1,5 @@
/*
- * net/sched/cls_fw.c Routing table based packet classifier.
+ * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -7,8 +7,13 @@
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes:
+ * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
+ * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -34,15 +39,56 @@
#include <net/sock.h>
#include <net/pkt_sched.h>
+struct fw_head
+{
+ struct fw_filter *ht[256];
+};
+
+struct fw_filter
+{
+ struct fw_filter *next;
+ u32 id;
+ struct tcf_result res;
+#ifdef CONFIG_NET_CLS_POLICE
+ struct tcf_police *police;
+#endif
+};
+
+static __inline__ int fw_hash(u32 handle)
+{
+ return handle&0xFF;
+}
static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- u32 clid = skb->fwmark;
+ struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_filter *f;
+#ifdef CONFIG_IP_FIREWALL
+ u32 id = skb->fwmark;
+#else
+ u32 id = 0;
+#endif
- if (clid && (TC_H_MAJ(clid) == 0 ||
- !(TC_H_MAJ(clid^tp->q->handle)))) {
- res->classid = clid;
+ if (head == NULL)
+ goto old_method;
+
+ for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+ if (f->id == id) {
+ *res = f->res;
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police)
+ return tcf_police(skb, f->police);
+#endif
+ return 0;
+ }
+ }
+ return -1;
+
+old_method:
+ if (id && (TC_H_MAJ(id) == 0 ||
+ !(TC_H_MAJ(id^tp->q->handle)))) {
+ res->classid = id;
res->class = 0;
return 0;
}
@@ -51,6 +97,16 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{
+ struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_filter *f;
+
+ if (head == NULL)
+ return 0;
+
+ for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+ if (f->id == handle)
+ return (unsigned long)f;
+ }
return 0;
}
@@ -60,24 +116,236 @@ static void fw_put(struct tcf_proto *tp, unsigned long f)
static int fw_init(struct tcf_proto *tp)
{
+ MOD_INC_USE_COUNT;
return 0;
}
static void fw_destroy(struct tcf_proto *tp)
{
+ struct fw_head *head = (struct fw_head*)xchg(&tp->root, NULL);
+ struct fw_filter *f;
+ int h;
+
+ if (head == NULL) {
+ MOD_DEC_USE_COUNT;
+ return;
+ }
+
+ for (h=0; h<256; h++) {
+ while ((f=head->ht[h]) != NULL) {
+ unsigned long cl;
+ head->ht[h] = f->next;
+
+ if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+#ifdef CONFIG_NET_CLS_POLICE
+ tcf_police_release(f->police);
+#endif
+ kfree(f);
+ }
+ }
+ kfree(head);
+ MOD_DEC_USE_COUNT;
}
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{
+ struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_filter *f = (struct fw_filter*)arg;
+ struct fw_filter **fp;
+
+ if (head == NULL || f == NULL)
+ return -EINVAL;
+
+ for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+ if (*fp == f) {
+ unsigned long cl;
+
+ *fp = f->next;
+ synchronize_bh();
+
+ if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+#ifdef CONFIG_NET_CLS_POLICE
+ tcf_police_release(f->police);
+#endif
+ kfree(f);
+ return 0;
+ }
+ }
return -EINVAL;
}
-static int fw_change(struct tcf_proto *tp, u32 handle,
- struct rtattr **tca,
- unsigned long *arg)
+static int fw_change(struct tcf_proto *tp, unsigned long base,
+ u32 handle,
+ struct rtattr **tca,
+ unsigned long *arg)
{
- return handle ? -EINVAL : 0;
+ struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_filter *f;
+ struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct rtattr *tb[TCA_FW_MAX];
+ int err;
+
+ if (!opt)
+ return handle ? -EINVAL : 0;
+
+ if (rtattr_parse(tb, TCA_FW_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
+ return -EINVAL;
+
+ if ((f = (struct fw_filter*)*arg) != NULL) {
+ /* Node exists: adjust only classid */
+
+ if (f->id != handle && handle)
+ return -EINVAL;
+ if (tb[TCA_FW_CLASSID-1]) {
+ unsigned long cl;
+
+ f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
+ cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid);
+ cl = cls_set_class(&f->res.class, cl);
+ if (cl)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+ }
+#ifdef CONFIG_NET_CLS_POLICE
+ if (tb[TCA_FW_POLICE-1]) {
+ struct tcf_police *police = tcf_police_locate(tb[TCA_FW_POLICE-1], tca[TCA_RATE-1]);
+
+ police = xchg(&f->police, police);
+ synchronize_bh();
+
+ tcf_police_release(police);
+ }
+#endif
+ return 0;
+ }
+
+ if (!handle)
+ return -EINVAL;
+
+ if (head == NULL) {
+ head = kmalloc(sizeof(struct fw_head), GFP_KERNEL);
+ if (head == NULL)
+ return -ENOBUFS;
+ memset(head, 0, sizeof(*head));
+
+ tp->root = head;
+ synchronize_bh();
+ }
+
+ f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL);
+ if (f == NULL)
+ return -ENOBUFS;
+ memset(f, 0, sizeof(*f));
+
+ f->id = handle;
+
+ if (tb[TCA_FW_CLASSID-1]) {
+ err = -EINVAL;
+ if (RTA_PAYLOAD(tb[TCA_FW_CLASSID-1]) != 4)
+ goto errout;
+ f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
+ cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ }
+
+#ifdef CONFIG_NET_CLS_POLICE
+ if (tb[TCA_FW_POLICE-1])
+ f->police = tcf_police_locate(tb[TCA_FW_POLICE-1], tca[TCA_RATE-1]);
+#endif
+
+ f->next = head->ht[fw_hash(handle)];
+ wmb();
+ head->ht[fw_hash(handle)] = f;
+
+ *arg = (unsigned long)f;
+ return 0;
+
+errout:
+ if (f)
+ kfree(f);
+ return err;
+}
+
+static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+ struct fw_head *head = (struct fw_head*)tp->root;
+ int h;
+
+ if (head == NULL)
+ arg->stop = 1;
+
+ if (arg->stop)
+ return;
+
+ for (h = 0; h < 256; h++) {
+ struct fw_filter *f;
+
+ for (f = head->ht[h]; f; f = f->next) {
+ if (arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(tp, (unsigned long)f, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+ }
+}
+
+#ifdef CONFIG_RTNETLINK
+static int fw_dump(struct tcf_proto *tp, unsigned long fh,
+ struct sk_buff *skb, struct tcmsg *t)
+{
+ struct fw_filter *f = (struct fw_filter*)fh;
+ unsigned char *b = skb->tail;
+ struct rtattr *rta;
+
+ if (f == NULL)
+ return skb->len;
+
+ t->tcm_handle = f->id;
+
+ if (!f->res.classid
+#ifdef CONFIG_NET_CLS_POLICE
+ && !f->police
+#endif
+ )
+ return skb->len;
+
+ rta = (struct rtattr*)b;
+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+
+ if (f->res.classid)
+ RTA_PUT(skb, TCA_FW_CLASSID, 4, &f->res.classid);
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police) {
+ struct rtattr * p_rta = (struct rtattr*)skb->tail;
+
+ RTA_PUT(skb, TCA_FW_POLICE, 0, NULL);
+
+ if (tcf_police_dump(skb, f->police) < 0)
+ goto rtattr_failure;
+
+ p_rta->rta_len = skb->tail - (u8*)p_rta;
+ }
+#endif
+
+ rta->rta_len = skb->tail - b;
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police) {
+ RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ }
+#endif
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
}
+#endif
+
struct tcf_proto_ops cls_fw_ops = {
NULL,
@@ -90,5 +358,22 @@ struct tcf_proto_ops cls_fw_ops = {
fw_put,
fw_change,
fw_delete,
- NULL,
+ fw_walk,
+#ifdef CONFIG_RTNETLINK
+ fw_dump
+#else
+ NULL
+#endif
};
+
+#ifdef MODULE
+int init_module(void)
+{
+ return register_tcf_proto_ops(&cls_fw_ops);
+}
+
+void cleanup_module(void)
+{
+ unregister_tcf_proto_ops(&cls_fw_ops);
+}
+#endif
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index a78f2090e..f83e79134 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -1,5 +1,5 @@
/*
- * net/sched/cls_route.c Routing table based packet classifier.
+ * net/sched/cls_route.c ROUTE4 classifier.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/config.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/bitops.h>
@@ -34,65 +35,598 @@
#include <net/sock.h>
#include <net/pkt_sched.h>
+/*
+ 1. For now we assume that route tags < 256.
+ It allows to use direct table lookups, instead of hash tables.
+ 2. For now we assume that "from TAG" and "fromdev DEV" statements
+ are mutually exclusive.
+ 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
+ */
+
+struct route4_fastmap
+{
+ struct route4_filter *filter;
+ u32 id;
+ int iif;
+};
+
+struct route4_head
+{
+ struct route4_fastmap fastmap[16];
+ struct route4_bucket *table[256+1];
+};
+
+struct route4_bucket
+{
+ struct route4_filter *ht[16+16+1];
+};
+
+struct route4_filter
+{
+ struct route4_filter *next;
+ u32 id;
+ int iif;
+
+ struct tcf_result res;
+#ifdef CONFIG_NET_CLS_POLICE
+ struct tcf_police *police;
+#endif
+
+ u32 handle;
+ struct route4_bucket *bkt;
+};
+
+#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+
+static __inline__ int route4_fastmap_hash(u32 id, int iif)
+{
+ return id&0xF;
+}
+
+static void route4_reset_fastmap(struct route4_head *head, u32 id)
+{
+ start_bh_atomic();
+ memset(head->fastmap, 0, sizeof(head->fastmap));
+ end_bh_atomic();
+}
+
+static void __inline__
+route4_set_fastmap(struct route4_head *head, u32 id, int iif,
+ struct route4_filter *f)
+{
+ int h = route4_fastmap_hash(id, iif);
+ head->fastmap[h].id = id;
+ head->fastmap[h].iif = iif;
+ head->fastmap[h].filter = f;
+}
+
+static __inline__ int route4_hash_to(u32 id)
+{
+ return id&0xFF;
+}
+
+static __inline__ int route4_hash_from(u32 id)
+{
+ return (id>>16)&0xF;
+}
+
+static __inline__ int route4_hash_iif(int iif)
+{
+ return 16 + ((iif>>16)&0xF);
+}
+
+static __inline__ int route4_hash_wild(void)
+{
+ return 32;
+}
+
+#ifdef CONFIG_NET_CLS_POLICE
+#define IF_ROUTE_POLICE \
+if (f->police) { \
+ int pol_res = tcf_police(skb, f->police); \
+ if (pol_res >= 0) return pol_res; \
+ dont_cache = 1; \
+ continue; \
+} \
+if (!dont_cache)
+#else
+#define IF_ROUTE_POLICE
+#endif
+
-static int route_classify(struct sk_buff *skb, struct tcf_proto *tp,
- struct tcf_result *res)
+static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
+ struct tcf_result *res)
{
- struct dst_entry *dst = skb->dst;
+ struct route4_head *head = (struct route4_head*)tp->root;
+ struct dst_entry *dst;
+ struct route4_bucket *b;
+ struct route4_filter *f;
+#ifdef CONFIG_NET_CLS_POLICE
+ int dont_cache = 0;
+#endif
+ u32 id, h;
+ int iif;
- if (dst) {
- u32 clid = dst->tclassid;
+ if ((dst = skb->dst) == NULL)
+ goto failure;
- if (clid && (TC_H_MAJ(clid) == 0 ||
- !(TC_H_MAJ(clid^tp->q->handle)))) {
- res->classid = clid;
- res->class = 0;
+ id = dst->tclassid;
+ if (head == NULL)
+ goto old_method;
+
+ iif = ((struct rtable*)dst)->key.iif;
+
+ h = route4_fastmap_hash(id, iif);
+ if (id == head->fastmap[h].id &&
+ iif == head->fastmap[h].iif &&
+ (f = head->fastmap[h].filter) != NULL) {
+ if (f == ROUTE4_FAILURE)
+ goto failure;
+
+ *res = f->res;
+ return 0;
+ }
+
+ h = route4_hash_to(id);
+
+restart:
+ if ((b = head->table[h]) != NULL) {
+ f = b->ht[route4_hash_from(id)];
+
+ for ( ; f; f = f->next) {
+ if (f->id == id) {
+ *res = f->res;
+ IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
+ return 0;
+ }
+ }
+
+ for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) {
+ if (f->iif == iif) {
+ *res = f->res;
+ IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
+ return 0;
+ }
+ }
+
+ for (f = b->ht[route4_hash_wild()]; f; f = f->next) {
+ *res = f->res;
+ IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
return 0;
}
+
+ }
+ if (h < 256) {
+ h = 256;
+ id &= ~0xFFFF;
+ goto restart;
+ }
+
+#ifdef CONFIG_NET_CLS_POLICE
+ if (!dont_cache)
+#endif
+ route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
+failure:
+ return -1;
+
+old_method:
+ if (id && (TC_H_MAJ(id) == 0 ||
+ !(TC_H_MAJ(id^tp->q->handle)))) {
+ res->classid = id;
+ res->class = 0;
+ return 0;
}
return -1;
}
-static unsigned long route_get(struct tcf_proto *tp, u32 handle)
+static u32 to_hash(u32 id)
+{
+ u32 h = id&0xFF;
+ if (id&0x8000)
+ h += 256;
+ return h;
+}
+
+static u32 from_hash(u32 id)
{
+ id &= 0xFFFF;
+ if (id == 0xFFFF)
+ return 32;
+ if (!(id & 0x8000)) {
+ if (id > 255)
+ return 256;
+ return id&0xF;
+ }
+ return 16 + (id&0xF);
+}
+
+static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
+{
+ struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_bucket *b;
+ struct route4_filter *f;
+ unsigned h1, h2;
+
+ if (!head)
+ return 0;
+
+ h1 = to_hash(handle);
+ if (h1 > 256)
+ return 0;
+
+ h2 = from_hash(handle>>16);
+ if (h2 > 32)
+ return 0;
+
+ if ((b = head->table[h1]) != NULL) {
+ for (f = b->ht[h2]; f; f = f->next)
+ if (f->handle == handle)
+ return (unsigned long)f;
+ }
return 0;
}
-static void route_put(struct tcf_proto *tp, unsigned long f)
+static void route4_put(struct tcf_proto *tp, unsigned long f)
{
}
-static int route_init(struct tcf_proto *tp)
+static int route4_init(struct tcf_proto *tp)
{
+ MOD_INC_USE_COUNT;
return 0;
}
-static void route_destroy(struct tcf_proto *tp)
+static void route4_destroy(struct tcf_proto *tp)
{
+ struct route4_head *head = xchg(&tp->root, NULL);
+ int h1, h2;
+
+ if (head == NULL) {
+ MOD_DEC_USE_COUNT;
+ return;
+ }
+
+ for (h1=0; h1<=256; h1++) {
+ struct route4_bucket *b;
+
+ if ((b = head->table[h1]) != NULL) {
+ for (h2=0; h2<=32; h2++) {
+ struct route4_filter *f;
+
+ while ((f = b->ht[h2]) != NULL) {
+ unsigned long cl;
+
+ b->ht[h2] = f->next;
+ if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+#ifdef CONFIG_NET_CLS_POLICE
+ tcf_police_release(f->police);
+#endif
+ kfree(f);
+ }
+ }
+ kfree(b);
+ }
+ }
+ kfree(head);
+ MOD_DEC_USE_COUNT;
}
-static int route_delete(struct tcf_proto *tp, unsigned long arg)
+static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
- return -EINVAL;
+ struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_filter **fp, *f = (struct route4_filter*)arg;
+ unsigned h = f->handle;
+ struct route4_bucket *b;
+ int i;
+
+ if (!head || !f)
+ return -EINVAL;
+
+ b = f->bkt;
+
+ for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+ if (*fp == f) {
+ unsigned long cl;
+
+ *fp = f->next;
+ synchronize_bh();
+
+ route4_reset_fastmap(head, f->id);
+
+ if ((cl = cls_set_class(&f->res.class, 0)) != 0)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+
+#ifdef CONFIG_NET_CLS_POLICE
+ tcf_police_release(f->police);
+#endif
+ kfree(f);
+
+ /* Strip tree */
+
+ for (i=0; i<=32; i++)
+ if (b->ht[i])
+ return 0;
+
+ /* OK, session has no flows */
+ head->table[to_hash(h)] = NULL;
+ synchronize_bh();
+
+ kfree(b);
+ return 0;
+ }
+ }
+ return 0;
}
-static int route_change(struct tcf_proto *tp, u32 handle,
- struct rtattr **tca,
- unsigned long *arg)
+static int route4_change(struct tcf_proto *tp, unsigned long base,
+ u32 handle,
+ struct rtattr **tca,
+ unsigned long *arg)
{
- return handle ? -EINVAL : 0;
+ struct route4_head *head = tp->root;
+ struct route4_filter *f, *f1, **ins_f;
+ struct route4_bucket *b;
+ struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct rtattr *tb[TCA_ROUTE4_MAX];
+ unsigned h1, h2;
+ int err;
+
+ if (opt == NULL)
+ return handle ? -EINVAL : 0;
+
+ if (rtattr_parse(tb, TCA_ROUTE4_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
+ return -EINVAL;
+
+ if ((f = (struct route4_filter*)*arg) != NULL) {
+ /* Node exists: adjust only classid */
+
+ if (f->handle != handle && handle)
+ return -EINVAL;
+ if (tb[TCA_ROUTE4_CLASSID-1]) {
+ unsigned long cl;
+
+ f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
+ cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+ if (cl)
+ tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
+ }
+#ifdef CONFIG_NET_CLS_POLICE
+ if (tb[TCA_ROUTE4_POLICE-1]) {
+ struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
+
+ police = xchg(&f->police, police);
+ synchronize_bh();
+
+ tcf_police_release(police);
+ }
+#endif
+ return 0;
+ }
+
+ /* Now more serious part... */
+
+ if (head == NULL) {
+ head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
+ if (head == NULL)
+ return -ENOBUFS;
+ memset(head, 0, sizeof(struct route4_head));
+
+ tp->root = head;
+ synchronize_bh();
+ }
+
+ f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
+ if (f == NULL)
+ return -ENOBUFS;
+
+ memset(f, 0, sizeof(*f));
+
+ err = -EINVAL;
+ f->handle = 0x8000;
+ if (tb[TCA_ROUTE4_TO-1]) {
+ if (handle&0x8000)
+ goto errout;
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < 4)
+ goto errout;
+ f->id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
+ if (f->id > 0xFF)
+ goto errout;
+ f->handle = f->id;
+ }
+ if (tb[TCA_ROUTE4_FROM-1]) {
+ u32 sid;
+ if (tb[TCA_ROUTE4_IIF-1])
+ goto errout;
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < 4)
+ goto errout;
+ sid = (*(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]));
+ if (sid > 0xFF)
+ goto errout;
+ f->handle |= sid<<16;
+ f->id |= sid<<16;
+ } else if (tb[TCA_ROUTE4_IIF-1]) {
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < 4)
+ goto errout;
+ f->iif = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
+ if (f->iif > 0x7FFF)
+ goto errout;
+ f->handle |= (f->iif|0x8000)<<16;
+ } else
+ f->handle |= 0xFFFF<<16;
+
+ if (handle) {
+ f->handle |= handle&0x7F00;
+ if (f->handle != handle)
+ goto errout;
+ }
+
+ if (tb[TCA_ROUTE4_CLASSID-1]) {
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < 4)
+ goto errout;
+ f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
+ }
+
+ h1 = to_hash(f->handle);
+ if ((b = head->table[h1]) == NULL) {
+ err = -ENOBUFS;
+ b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
+ if (b == NULL)
+ goto errout;
+ memset(b, 0, sizeof(*b));
+
+ head->table[h1] = b;
+ synchronize_bh();
+ }
+ f->bkt = b;
+
+ err = -EEXIST;
+ h2 = from_hash(f->handle>>16);
+ for (ins_f = &b->ht[h2]; (f1=*ins_f) != NULL; ins_f = &f1->next) {
+ if (f->handle < f1->handle)
+ break;
+ if (f1->handle == f->handle)
+ goto errout;
+ }
+
+ cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
+#ifdef CONFIG_NET_CLS_POLICE
+ if (tb[TCA_ROUTE4_POLICE-1])
+ f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
+#endif
+
+ f->next = f1;
+ wmb();
+ *ins_f = f;
+
+ route4_reset_fastmap(head, f->id);
+ *arg = (unsigned long)f;
+ return 0;
+
+errout:
+ if (f)
+ kfree(f);
+ return err;
+}
+
+static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+ struct route4_head *head = tp->root;
+ unsigned h, h1;
+
+ if (head == NULL)
+ arg->stop = 1;
+
+ if (arg->stop)
+ return;
+
+ for (h = 0; h <= 256; h++) {
+ struct route4_bucket *b = head->table[h];
+
+ if (b) {
+ for (h1 = 0; h1 <= 32; h1++) {
+ struct route4_filter *f;
+
+ for (f = b->ht[h1]; f; f = f->next) {
+ if (arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(tp, (unsigned long)f, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+ }
+ }
+ }
}
-struct tcf_proto_ops cls_route_ops = {
+#ifdef CONFIG_RTNETLINK
+static int route4_dump(struct tcf_proto *tp, unsigned long fh,
+ struct sk_buff *skb, struct tcmsg *t)
+{
+ struct route4_filter *f = (struct route4_filter*)fh;
+ unsigned char *b = skb->tail;
+ struct rtattr *rta;
+ u32 id;
+
+ if (f == NULL)
+ return skb->len;
+
+ t->tcm_handle = f->handle;
+
+ rta = (struct rtattr*)b;
+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+
+ if (!(f->handle&0x8000)) {
+ id = f->id&0xFF;
+ RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
+ }
+ if (f->handle&0x80000000) {
+ if ((f->handle>>16) != 0xFFFF)
+ RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
+ } else {
+ id = f->id>>16;
+ RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
+ }
+ if (f->res.classid)
+ RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police) {
+ struct rtattr * p_rta = (struct rtattr*)skb->tail;
+
+ RTA_PUT(skb, TCA_ROUTE4_POLICE, 0, NULL);
+
+ if (tcf_police_dump(skb, f->police) < 0)
+ goto rtattr_failure;
+
+ p_rta->rta_len = skb->tail - (u8*)p_rta;
+ }
+#endif
+
+ rta->rta_len = skb->tail - b;
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police) {
+ RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ }
+#endif
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+#endif
+
+struct tcf_proto_ops cls_route4_ops = {
NULL,
"route",
- route_classify,
- route_init,
- route_destroy,
-
- route_get,
- route_put,
- route_change,
- route_delete,
- NULL,
+ route4_classify,
+ route4_init,
+ route4_destroy,
+
+ route4_get,
+ route4_put,
+ route4_change,
+ route4_delete,
+ route4_walk,
+#ifdef CONFIG_RTNETLINK
+ route4_dump
+#else
+ NULL
+#endif
};
+
+#ifdef MODULE
+int init_module(void)
+{
+ return register_tcf_proto_ops(&cls_route4_ops);
+}
+
+void cleanup_module(void)
+{
+ unregister_tcf_proto_ops(&cls_route4_ops);
+}
+#endif
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 4168f541f..48142c6e7 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -120,6 +120,18 @@ static __inline__ unsigned hash_src(u32 *src)
return h & 0xF;
}
+#ifdef CONFIG_NET_CLS_POLICE
+#define RSVP_POLICE() \
+if (f->police) { \
+ int pol_res = tcf_police(skb, f->police); \
+ if (pol_res < 0) continue; \
+ if (pol_res) return pol_res; \
+}
+#else
+#define RSVP_POLICE()
+#endif
+
+
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
@@ -137,7 +149,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct iphdr *nhptr = skb->nh.iph;
#endif
-#ifndef __i386__
+#if !defined( __i386__) && !defined(__mc68000__)
if ((unsigned long)nhptr & 3)
return -1;
#endif
@@ -181,25 +193,26 @@ restart:
&& src[2] == f->src[2]
#endif
) {
+ *res = f->res;
+
+ RSVP_POLICE();
+
matched:
- if (f->tunnelhdr == 0) {
- *res = f->res;
-#ifdef CONFIG_NET_CLS_POLICE
- if (f->police)
- return tcf_police(skb, f->police);
-#endif
+ if (f->tunnelhdr == 0)
return 0;
- } else {
- tunnelid = f->res.classid;
- nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
- goto restart;
- }
+
+ tunnelid = f->res.classid;
+ nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+ goto restart;
}
}
/* And wildcard bucket... */
- if ((f = s->ht[16]) != NULL)
+ for (f = s->ht[16]; f; f = f->next) {
+ *res = f->res;
+ RSVP_POLICE();
goto matched;
+ }
return -1;
}
}
@@ -260,7 +273,6 @@ static void rsvp_destroy(struct tcf_proto *tp)
struct rsvp_session *s;
while ((s = sht[h1]) != NULL) {
-
sht[h1] = s->next;
for (h2=0; h2<=16; h2++) {
@@ -270,7 +282,7 @@ static void rsvp_destroy(struct tcf_proto *tp)
unsigned long cl;
s->ht[h2] = f->next;
- if ((cl = xchg(&f->res.class, 0)) != 0)
+ if ((cl = cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(f->police);
@@ -297,8 +309,11 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
if (*fp == f) {
unsigned long cl;
+
*fp = f->next;
- if ((cl = xchg(&f->res.class, 0)) != 0)
+ synchronize_bh();
+
+ if ((cl = cls_set_class(&f->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
@@ -318,11 +333,13 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
*sp; sp = &(*sp)->next) {
if (*sp == s) {
*sp = s->next;
+ synchronize_bh();
+
kfree(s);
return 0;
}
}
-
+
return 0;
}
}
@@ -399,7 +416,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
return 0;
}
-static int rsvp_change(struct tcf_proto *tp, u32 handle,
+static int rsvp_change(struct tcf_proto *tp, unsigned long base,
+ u32 handle,
struct rtattr **tca,
unsigned long *arg)
{
@@ -425,17 +443,21 @@ static int rsvp_change(struct tcf_proto *tp, u32 handle,
if (f->handle != handle && handle)
return -EINVAL;
if (tb[TCA_RSVP_CLASSID-1]) {
- unsigned long cl = xchg(&f->res.class, 0);
+ unsigned long cl;
+
+ f->res.classid = *(u32*)RTA_DATA(tb[TCA_RSVP_CLASSID-1]);
+ cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
if (cl)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
- f->res.classid = *(u32*)RTA_DATA(tb[TCA_RSVP_CLASSID-1]);
- f->res.class = tp->q->ops->cl_ops->bind_tcf(tp->q, f->res.classid);
}
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_RSVP_POLICE-1]) {
- struct tcf_police *police = tcf_police_locate(tb[TCA_RSVP_POLICE-1]);
+ struct tcf_police *police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
- tcf_police_release(xchg(&f->police, police));
+ police = xchg(&f->police, police);
+ synchronize_bh();
+
+ tcf_police_release(police);
}
#endif
return 0;
@@ -514,17 +536,19 @@ insert:
f->sess = s;
if (f->tunnelhdr == 0)
- f->res.class = tp->q->ops->cl_ops->bind_tcf(tp->q, f->res.classid);
+ cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_RSVP_POLICE-1])
- f->police = tcf_police_locate(tb[TCA_RSVP_POLICE-1]);
+ f->police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
#endif
for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
break;
f->next = *fp;
+ wmb();
*fp = f;
+
*arg = (unsigned long)f;
return 0;
}
@@ -546,7 +570,9 @@ insert:
break;
}
s->next = *sp;
+ wmb();
*sp = s;
+
goto insert;
errout:
@@ -631,6 +657,11 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
#endif
rta->rta_len = skb->tail - b;
+#ifdef CONFIG_NET_CLS_POLICE
+ if (f->police) {
+ RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
+ }
+#endif
return skb->len;
rtattr_failure:
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index cb52e9d07..98d4e1f7b 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -114,7 +114,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
int sel = 0;
int i;
-#ifndef __i386__
+#if !defined(__i386__) && !defined(__mc68000__)
if ((unsigned long)ptr & 3)
return -1;
#endif
@@ -137,10 +137,13 @@ check_terminal:
if (n->sel.flags&TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_POLICE
- if (n->police)
- return tcf_police(skb, n->police);
+ if (n->police) {
+ int pol_res = tcf_police(skb, n->police);
+ if (pol_res >= 0)
+ return pol_res;
+ } else
#endif
- return 0;
+ return 0;
}
n = n->next;
goto next_knode;
@@ -304,7 +307,7 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
{
unsigned long cl;
- if ((cl = xchg(&n->res.class, 0)) != 0)
+ if ((cl = cls_set_class(&n->res.class, 0)) != 0)
tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
#ifdef CONFIG_NET_CLS_POLICE
tcf_police_release(n->police);
@@ -324,6 +327,8 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
if (*kp == key) {
*kp = key->next;
+ synchronize_bh();
+
u32_destroy_key(tp, key);
return 0;
}
@@ -341,6 +346,8 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
for (h=0; h<=ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
+ synchronize_bh();
+
u32_destroy_key(tp, n);
}
}
@@ -402,6 +409,7 @@ static void u32_destroy(struct tcf_proto *tp)
kfree(tp_c);
}
+ MOD_DEC_USE_COUNT;
tp->data = NULL;
}
@@ -437,8 +445,10 @@ static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
return handle|(i>0xFFF ? 0xFFF : i);
}
-static int u32_set_parms(struct Qdisc *q, struct tc_u_hnode *ht,
- struct tc_u_knode *n, struct rtattr **tb)
+static int u32_set_parms(struct Qdisc *q, unsigned long base,
+ struct tc_u_hnode *ht,
+ struct tc_u_knode *n, struct rtattr **tb,
+ struct rtattr *est)
{
if (tb[TCA_U32_LINK-1]) {
u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
@@ -456,28 +466,33 @@ static int u32_set_parms(struct Qdisc *q, struct tc_u_hnode *ht,
}
ht_down = xchg(&n->ht_down, ht_down);
+ synchronize_bh();
if (ht_down)
ht_down->refcnt--;
}
if (tb[TCA_U32_CLASSID-1]) {
- unsigned long cl = xchg(&n->res.class, 0);
+ unsigned long cl;
+
+ n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
+ cl = cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
if (cl)
q->ops->cl_ops->unbind_tcf(q, cl);
- n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
- n->res.class = q->ops->cl_ops->bind_tcf(q, n->res.classid);
}
#ifdef CONFIG_NET_CLS_POLICE
if (tb[TCA_U32_POLICE-1]) {
- struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1]);
+ struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
+
+ police = xchg(&n->police, police);
+ synchronize_bh();
- tcf_police_release(xchg(&n->police, police));
+ tcf_police_release(police);
}
#endif
return 0;
}
-static int u32_change(struct tcf_proto *tp, u32 handle,
+static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
struct rtattr **tca,
unsigned long *arg)
{
@@ -500,7 +515,7 @@ static int u32_change(struct tcf_proto *tp, u32 handle,
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
- return u32_set_parms(tp->q, n->ht_up, n, tb);
+ return u32_set_parms(tp->q, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
}
if (tb[TCA_U32_DIVISOR-1]) {
@@ -531,7 +546,7 @@ static int u32_change(struct tcf_proto *tp, u32 handle,
if (tb[TCA_U32_HASH-1]) {
htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
- if (TC_U32_HTID(handle) == TC_U32_ROOT) {
+ if (TC_U32_HTID(htid) == TC_U32_ROOT) {
ht = tp->root;
htid = ht->handle;
} else {
@@ -550,8 +565,6 @@ static int u32_change(struct tcf_proto *tp, u32 handle,
if (handle) {
if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
return -EINVAL;
- if (TC_U32_HASH(handle) && TC_U32_HASH(handle^htid))
- return -EINVAL;
handle = htid | TC_U32_NODE(handle);
} else
handle = gen_new_kid(ht, htid);
@@ -568,14 +581,17 @@ static int u32_change(struct tcf_proto *tp, u32 handle,
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
- err = u32_set_parms(tp->q, ht, n, tb);
+ err = u32_set_parms(tp->q, base, ht, n, tb, tca[TCA_RATE-1]);
if (err == 0) {
struct tc_u_knode **ins;
for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
- if (TC_U32_NODE(handle) >= TC_U32_NODE((*ins)->handle))
+ if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
break;
+
n->next = *ins;
+ wmb();
*ins = n;
+
*arg = (unsigned long)n;
return 0;
}
@@ -664,6 +680,11 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
}
rta->rta_len = skb->tail - b;
+#ifdef CONFIG_NET_CLS_POLICE
+ if (TC_U32_KEY(n->handle) && n->police) {
+ RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &n->police->stats);
+ }
+#endif
return skb->len;
rtattr_failure:
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
index 463879606..d51017c84 100644
--- a/net/sched/estimator.c
+++ b/net/sched/estimator.c
@@ -171,8 +171,10 @@ void qdisc_kill_estimator(struct tc_stats *stats)
pest = &est->next;
continue;
}
- /* ATOMIC_SET */
+
*pest = est->next;
+ synchronize_bh();
+
kfree(est);
killed++;
}
diff --git a/net/sched/police.c b/net/sched/police.c
index 13599ac49..89e58d8be 100644
--- a/net/sched/police.c
+++ b/net/sched/police.c
@@ -74,6 +74,9 @@ void tcf_police_destroy(struct tcf_police *p)
for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
if (*p1p == p) {
*p1p = p->next;
+#ifdef CONFIG_NET_ESTIMATOR
+ qdisc_kill_estimator(&p->stats);
+#endif
if (p->R_tab)
qdisc_put_rtab(p->R_tab);
if (p->P_tab)
@@ -85,7 +88,7 @@ void tcf_police_destroy(struct tcf_police *p)
BUG_TRAP(0);
}
-struct tcf_police * tcf_police_locate(struct rtattr *rta)
+struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
{
unsigned h;
struct tcf_police *p;
@@ -111,20 +114,35 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta)
memset(p, 0, sizeof(*p));
p->refcnt = 1;
- if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL)
- goto failure;
- if (parm->peakrate.rate &&
- (p->P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE-1])) == NULL)
- goto failure;
+ if (parm->rate.rate) {
+ if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL)
+ goto failure;
+ if (parm->peakrate.rate &&
+ (p->P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE-1])) == NULL)
+ goto failure;
+ }
+ if (tb[TCA_POLICE_RESULT-1])
+ p->result = *(int*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
+#ifdef CONFIG_NET_ESTIMATOR
+ if (tb[TCA_POLICE_AVRATE-1])
+ p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
+#endif
p->toks = p->burst = parm->burst;
p->mtu = parm->mtu;
- if (p->mtu == 0)
- p->mtu = 255<<p->R_tab->rate.cell_log;
+ if (p->mtu == 0) {
+ p->mtu = ~0;
+ if (p->R_tab)
+ p->mtu = 255<<p->R_tab->rate.cell_log;
+ }
if (p->P_tab)
p->ptoks = L2T_P(p, p->mtu);
PSCHED_GET_TIME(p->t_c);
p->index = parm->index ? : tcf_police_new_index();
p->action = parm->action;
+#ifdef CONFIG_NET_ESTIMATOR
+ if (est)
+ qdisc_new_estimator(&p->stats, est);
+#endif
h = tcf_police_hash(p->index);
p->next = tcf_police_ht[h];
tcf_police_ht[h] = p;
@@ -143,7 +161,20 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
long toks;
long ptoks = 0;
+ p->stats.bytes += skb->len;
+ p->stats.packets++;
+
+#ifdef CONFIG_NET_ESTIMATOR
+ if (p->ewma_rate && p->stats.bps >= p->ewma_rate) {
+ p->stats.overlimits++;
+ return p->action;
+ }
+#endif
+
if (skb->len <= p->mtu) {
+ if (p->R_tab == NULL)
+ return p->result;
+
PSCHED_GET_TIME(now);
toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst, 0);
@@ -163,10 +194,11 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
p->t_c = now;
p->toks = toks;
p->ptoks = ptoks;
- return TC_POLICE_OK;
+ return p->result;
}
}
+ p->stats.overlimits++;
return p->action;
}
@@ -180,12 +212,21 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
opt.action = p->action;
opt.mtu = p->mtu;
opt.burst = p->burst;
- opt.rate = p->R_tab->rate;
+ if (p->R_tab)
+ opt.rate = p->R_tab->rate;
+ else
+ memset(&opt.rate, 0, sizeof(opt.rate));
if (p->P_tab)
opt.peakrate = p->P_tab->rate;
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
+ if (p->result)
+ RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
+#ifdef CONFIG_NET_ESTIMATOR
+ if (p->ewma_rate)
+ RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
+#endif
return skb->len;
rtattr_failure:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f16638081..0ced70bbc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -11,6 +11,7 @@
* Fixes:
*
* Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
+ * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
*/
#include <linux/config.h>
@@ -29,6 +30,7 @@
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
+#include <linux/kmod.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -41,7 +43,7 @@
#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
#ifdef CONFIG_RTNETLINK
-static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
+static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new);
static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
struct Qdisc *q, unsigned long cl, int event);
@@ -116,6 +118,10 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
---destroy
destroys resources allocated by init and during lifetime of qdisc.
+
+ ---change
+
+ changes qdisc parameters.
*/
/************************************************
@@ -177,22 +183,22 @@ struct Qdisc *qdisc_lookup(struct device *dev, u32 handle)
return NULL;
}
-/* We know classid. Find qdisc among all qdisc's attached to device
- (root qdisc, all its children, children of children etc.)
- */
-
-struct Qdisc *qdisc_lookup_class(struct device *dev, u32 classid)
+struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
- struct Qdisc *q;
+ unsigned long cl;
+ struct Qdisc *leaf;
+ struct Qdisc_class_ops *cops = p->ops->cl_ops;
- for (q = dev->qdisc_list; q; q = q->next) {
- if (q->classid == classid)
- return q;
- }
- return NULL;
+ if (cops == NULL)
+ return NULL;
+ cl = cops->get(p, classid);
+ if (cl == 0)
+ return NULL;
+ leaf = cops->leaf(p, cl);
+ cops->put(p, cl);
+ return leaf;
}
-
/* Find queueing discipline by name */
struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
@@ -268,6 +274,37 @@ u32 qdisc_alloc_handle(struct device *dev)
return i>0 ? autohandle : 0;
}
+/* Attach toplevel qdisc to device dev */
+
+static struct Qdisc *
+dev_graft_qdisc(struct device *dev, struct Qdisc *qdisc)
+{
+ struct Qdisc *oqdisc;
+
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+ start_bh_atomic();
+ oqdisc = dev->qdisc_sleeping;
+
+ /* Prune old scheduler */
+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+ qdisc_reset(oqdisc);
+
+ /* ... and graft new one */
+ if (qdisc == NULL)
+ qdisc = &noop_qdisc;
+ dev->qdisc_sleeping = qdisc;
+ dev->qdisc = &noop_qdisc;
+ end_bh_atomic();
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+
+ return oqdisc;
+}
+
+
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
to device "dev".
@@ -280,17 +317,10 @@ int qdisc_graft(struct device *dev, struct Qdisc *parent, u32 classid,
int err = 0;
if (parent == NULL) {
- BUG_TRAP(classid == TC_H_ROOT);
- if (new) {
- new->parent = NULL;
- new->classid = TC_H_ROOT;
- }
- *old = dev_set_scheduler(dev, new);
+ *old = dev_graft_qdisc(dev, new);
} else {
struct Qdisc_class_ops *cops = parent->ops->cl_ops;
- BUG_TRAP(classid != TC_H_ROOT);
-
err = -EINVAL;
if (cops) {
@@ -313,22 +343,30 @@ int qdisc_graft(struct device *dev, struct Qdisc *parent, u32 classid,
*/
static struct Qdisc *
-qdisc_create(struct device *dev, struct Qdisc_ops *ops, u32 handle,
- u32 parentid, struct rtattr **tca, int *errp)
+qdisc_create(struct device *dev, u32 handle, struct rtattr **tca, int *errp)
{
int err;
struct rtattr *kind = tca[TCA_KIND-1];
struct Qdisc *sch = NULL;
+ struct Qdisc_ops *ops;
int size;
- int new = 0;
- if (ops == NULL) {
- ops = qdisc_lookup_ops(kind);
- err = -EINVAL;
- if (ops == NULL)
- goto err_out;
- new = 1;
+ ops = qdisc_lookup_ops(kind);
+#ifdef CONFIG_KMOD
+ if (ops==NULL && tca[TCA_KIND-1] != NULL) {
+ char module_name[4 + IFNAMSIZ + 1];
+
+ if (RTA_PAYLOAD(kind) <= IFNAMSIZ) {
+ sprintf(module_name, "sch_%s", (char*)RTA_DATA(kind));
+ request_module (module_name);
+ ops = qdisc_lookup_ops(kind);
+ }
}
+#endif
+
+ err = -EINVAL;
+ if (ops == NULL)
+ goto err_out;
size = sizeof(*sch) + ops->priv_size;
@@ -340,13 +378,8 @@ qdisc_create(struct device *dev, struct Qdisc_ops *ops, u32 handle,
/* Grrr... Resolve race condition with module unload */
err = -EINVAL;
- if (new) {
- if (ops != qdisc_lookup_ops(kind))
- goto err_out;
- } else if (kind) {
- if (rtattr_strcmp(kind, ops->id))
- goto err_out;
- }
+ if (ops != qdisc_lookup_ops(kind))
+ goto err_out;
memset(sch, 0, size);
@@ -355,6 +388,7 @@ qdisc_create(struct device *dev, struct Qdisc_ops *ops, u32 handle,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
+ atomic_set(&sch->refcnt, 1);
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
err = -ENOMEM;
@@ -362,9 +396,8 @@ qdisc_create(struct device *dev, struct Qdisc_ops *ops, u32 handle,
goto err_out;
}
sch->handle = handle;
- sch->classid = parentid;
- if (ops->init && (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
+ if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
sch->next = dev->qdisc_list;
dev->qdisc_list = sch;
#ifdef CONFIG_NET_ESTIMATOR
@@ -381,135 +414,241 @@ err_out:
return NULL;
}
+static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
+{
+ if (tca[TCA_OPTIONS-1]) {
+ int err;
+
+ if (sch->ops->change == NULL)
+ return -EINVAL;
+ err = sch->ops->change(sch, tca[TCA_OPTIONS-1]);
+ if (err)
+ return err;
+ }
+#ifdef CONFIG_NET_ESTIMATOR
+ if (tca[TCA_RATE-1]) {
+ qdisc_kill_estimator(&sch->stats);
+ qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]);
+ }
+#endif
+ return 0;
+}
+
+struct check_loop_arg
+{
+ struct qdisc_walker w;
+ struct Qdisc *p;
+ int depth;
+};
+
+static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
+
+static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
+{
+ struct check_loop_arg arg;
+
+ if (q->ops->cl_ops == NULL)
+ return 0;
+
+ arg.w.stop = arg.w.skip = arg.w.count = 0;
+ arg.w.fn = check_loop_fn;
+ arg.depth = depth;
+ arg.p = p;
+ q->ops->cl_ops->walk(q, &arg.w);
+ return arg.w.stop ? -ELOOP : 0;
+}
+
+static int
+check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
+{
+ struct Qdisc *leaf;
+ struct Qdisc_class_ops *cops = q->ops->cl_ops;
+ struct check_loop_arg *arg = (struct check_loop_arg *)w;
+
+ leaf = cops->leaf(q, cl);
+ if (leaf) {
+ if (leaf == arg->p || arg->depth > 7)
+ return -ELOOP;
+ return check_loop(leaf, arg->p, arg->depth + 1);
+ }
+ return 0;
+}
/*
- Create/delete/change/get qdisc.
+ * Delete/get qdisc.
*/
-static int tc_ctl_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
+static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct tcmsg *tcm = NLMSG_DATA(n);
struct rtattr **tca = arg;
struct device *dev;
u32 clid = tcm->tcm_parent;
- struct Qdisc *old_q;
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
- struct Qdisc *leaf = NULL;
- struct Qdisc_ops *qops = NULL;
int err;
- /* Find device */
if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
return -ENODEV;
- /* If parent is specified, it must exist
- and tcm_parent selects a class in parent which
- new qdisc will be attached to.
-
- The place may be already busy by another qdisc,
- remember this fact, if it was not auto-created discipline.
- */
if (clid) {
if (clid != TC_H_ROOT) {
- p = qdisc_lookup(dev, TC_H_MAJ(clid));
- if (p == NULL)
+ if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
return -ENOENT;
- leaf = qdisc_lookup_class(dev, clid);
+ q = qdisc_leaf(p, clid);
} else
- leaf = dev->qdisc_sleeping;
-
- if (leaf && leaf->flags&TCQ_F_DEFAULT && n->nlmsg_type == RTM_NEWQDISC)
- leaf = NULL;
+ q = dev->qdisc_sleeping;
- /*
- Also, leaf may be exactly that qdisc, which we want
- to control. Remember this to avoid one more qdisc_lookup.
- */
-
- if (leaf && leaf->handle == tcm->tcm_handle)
- q = leaf;
- }
+ if (!q)
+ return -ENOENT;
- /* Try to locate the discipline */
- if (tcm->tcm_handle && q == NULL) {
- if (TC_H_MIN(tcm->tcm_handle))
+ if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
return -EINVAL;
- q = qdisc_lookup(dev, tcm->tcm_handle);
+ } else {
+ if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ return -ENOENT;
}
- /* If discipline already exists, check that its real parent
- matches to one selected by tcm_parent.
- */
-
- if (q) {
- if (clid && p != q->parent)
- return -EINVAL;
- BUG_TRAP(!leaf || leaf == q);
- if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
+ if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
+ return -EINVAL;
+
+ if (n->nlmsg_type == RTM_DELQDISC) {
+ if (!clid)
return -EINVAL;
- clid = q->classid;
- goto process_existing;
+ if (q->handle == 0)
+ return -ENOENT;
+ if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
+ return err;
+ if (q) {
+ qdisc_notify(skb, n, clid, q, NULL);
+ qdisc_destroy(q);
+ }
+ } else {
+ qdisc_notify(skb, n, clid, NULL, q);
}
+ return 0;
+}
- /* The discipline is known not to exist.
- If parent was not selected too, return error.
- */
- if (clid == 0)
- return tcm->tcm_handle ? -ENOENT : -EINVAL;
+/*
+ Create/change qdisc.
+ */
- /* Check for the case when leaf is exactly the thing,
- that you want.
- */
+static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
+{
+ struct tcmsg *tcm = NLMSG_DATA(n);
+ struct rtattr **tca = arg;
+ struct device *dev;
+ u32 clid = tcm->tcm_parent;
+ struct Qdisc *q = NULL;
+ struct Qdisc *p = NULL;
+ int err;
+
+ if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
+ return -ENODEV;
- if (leaf && tcm->tcm_handle == 0) {
- q = leaf;
- if (!tca[TCA_KIND-1] || rtattr_strcmp(tca[TCA_KIND-1], q->ops->id) == 0)
- goto process_existing;
+ if (clid) {
+ if (clid != TC_H_ROOT) {
+ if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ return -ENOENT;
+ q = qdisc_leaf(p, clid);
+ } else {
+ q = dev->qdisc_sleeping;
+ }
+
+ /* It may be default qdisc, ignore it */
+ if (q && q->handle == 0)
+ q = NULL;
+
+ if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
+ if (tcm->tcm_handle) {
+ if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+ return -EEXIST;
+ if (TC_H_MIN(tcm->tcm_handle))
+ return -EINVAL;
+ if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ goto create_n_graft;
+ if (n->nlmsg_flags&NLM_F_EXCL)
+ return -EEXIST;
+ if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
+ return -EINVAL;
+ if (q == p ||
+ (p && check_loop(q, p, 0)))
+ return -ELOOP;
+ atomic_inc(&q->refcnt);
+ goto graft;
+ } else {
+ if (q == NULL)
+ goto create_n_graft;
+
+ /* This magic test requires explanation.
+ *
+ * We know, that some child q is already
+ * attached to this parent and have choice:
+ * either to change it or to create/graft new one.
+ *
+ * 1. We are allowed to create/graft only
+ * if CREATE and REPLACE flags are set.
+ *
+ * 2. If EXCL is set, requestor wanted to say,
+ * that qdisc tcm_handle is not expected
+ * to exist, so that we choose create/graft too.
+ *
+ * 3. The last case is when no flags are set.
+ * Alas, it is sort of hole in API, we
+ * cannot decide what to do unambiguously.
+ * For now we select create/graft, if
+ * user gave KIND, which does not match existing.
+ */
+ if ((n->nlmsg_flags&NLM_F_CREATE) &&
+ (n->nlmsg_flags&NLM_F_REPLACE) &&
+ ((n->nlmsg_flags&NLM_F_EXCL) ||
+ (tca[TCA_KIND-1] &&
+ rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))))
+ goto create_n_graft;
+ }
+ }
+ } else {
+ if (!tcm->tcm_handle)
+ return -EINVAL;
+ q = qdisc_lookup(dev, tcm->tcm_handle);
}
- if (n->nlmsg_type != RTM_NEWQDISC || !(n->nlmsg_flags&NLM_F_CREATE))
+ /* Change qdisc parameters */
+ if (q == NULL)
return -ENOENT;
- if (leaf && n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags&NLM_F_EXCL)
return -EEXIST;
+ if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
+ return -EINVAL;
+ err = qdisc_change(q, tca);
+ if (err == 0)
+ qdisc_notify(skb, n, clid, NULL, q);
+ return err;
-create_and_graft:
- q = qdisc_create(dev, qops, tcm->tcm_handle, clid, tca, &err);
+create_n_graft:
+ if (!(n->nlmsg_flags&NLM_F_CREATE))
+ return -ENOENT;
+ q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
if (q == NULL)
return err;
graft:
- err = qdisc_graft(dev, p, clid, q, &old_q);
- if (err) {
- if (q)
- qdisc_destroy(q);
- return err;
+ if (1) {
+ struct Qdisc *old_q = NULL;
+ err = qdisc_graft(dev, p, clid, q, &old_q);
+ if (err) {
+ if (q)
+ qdisc_destroy(q);
+ return err;
+ }
+ qdisc_notify(skb, n, clid, old_q, q);
+ if (old_q)
+ qdisc_destroy(old_q);
}
- qdisc_notify(skb, n, old_q, q);
- if (old_q)
- qdisc_destroy(old_q);
return 0;
-
-process_existing:
-
- switch (n->nlmsg_type) {
- case RTM_NEWQDISC:
- if (n->nlmsg_flags&NLM_F_EXCL)
- return -EEXIST;
- qops = q->ops;
- goto create_and_graft;
- case RTM_GETQDISC:
- qdisc_notify(skb, n, NULL, q);
- return 0;
- case RTM_DELQDISC:
- q = NULL;
- goto graft;
- default:
- return -EINVAL;
- }
}
-static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q,
+static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 pid, u32 seq, unsigned flags, int event)
{
struct tcmsg *tcm;
@@ -521,9 +660,9 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q,
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm_ifindex = q->dev ? q->dev->ifindex : 0;
- tcm->tcm_parent = q->classid;
+ tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
- tcm->tcm_info = 0;
+ tcm->tcm_info = atomic_read(&q->refcnt);
RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto rtattr_failure;
@@ -539,7 +678,7 @@ rtattr_failure:
}
static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
- struct Qdisc *old, struct Qdisc *new)
+ u32 clid, struct Qdisc *old, struct Qdisc *new)
{
struct sk_buff *skb;
u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -548,12 +687,12 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
if (!skb)
return -ENOBUFS;
- if (old && !(old->flags&TCQ_F_DEFAULT)) {
- if (tc_fill_qdisc(skb, old, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+ if (old && old->handle) {
+ if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
goto err_out;
}
if (new) {
- if (tc_fill_qdisc(skb, new, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+ if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out;
}
@@ -583,7 +722,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
q = q->next, q_idx++) {
if (q_idx < s_q_idx)
continue;
- if (tc_fill_qdisc(skb, q, NETLINK_CB(cb->skb).pid,
+ if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
goto done;
}
@@ -797,11 +936,10 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
for (q=dev->qdisc_list, t=0; q; q = q->next, t++) {
if (t < s_t) continue;
if (!q->ops->cl_ops) continue;
- if (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle
- && (tcm->tcm_parent != TC_H_ROOT || q->parent != NULL))
+ if (tcm->tcm_parent && TC_H_MAJ(tcm->tcm_parent) != q->handle)
continue;
if (t > s_t)
- memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(int));
+ memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
arg.w.fn = qdisc_class_dump;
arg.skb = skb;
arg.cb = cb;
@@ -846,6 +984,20 @@ static int psched_read_proc(char *buffer, char **start, off_t offset,
}
#endif
+#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
+int psched_tod_diff(int delta_sec, int bound)
+{
+ int delta;
+
+ if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1)
+ return bound;
+ delta = delta_sec * 1000000;
+ if (delta > bound)
+ delta = bound;
+ return delta;
+}
+#endif
+
psched_time_t psched_time_base;
#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
@@ -866,7 +1018,8 @@ static void psched_tick(unsigned long dummy)
#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
psched_time_t dummy_stamp;
PSCHED_GET_TIME(dummy_stamp);
- psched_timer.expires = jiffies + 4*HZ;
+ /* It is OK up to 4GHz cpu */
+ psched_timer.expires = jiffies + 1*HZ;
#else
unsigned long now = jiffies;
psched_time_base = ((u64)now)<<PSCHED_JSCALE;
@@ -891,7 +1044,6 @@ __initfunc(int psched_calibrate_clock(void))
return -1;
#endif
- start_bh_atomic();
#ifdef PSCHED_WATCHER
psched_tick(0);
#endif
@@ -902,7 +1054,6 @@ __initfunc(int psched_calibrate_clock(void))
barrier();
PSCHED_GET_TIME(stamp1);
do_gettimeofday(&tv1);
- end_bh_atomic();
delay = PSCHED_TDIFF(stamp1, stamp);
rdelay = tv1.tv_usec - tv.tv_usec;
@@ -921,6 +1072,9 @@ __initfunc(int psched_calibrate_clock(void))
__initfunc(int pktsched_init(void))
{
+#ifdef CONFIG_RTNETLINK
+ struct rtnetlink_link *link_p;
+#endif
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *ent;
#endif
@@ -931,19 +1085,22 @@ __initfunc(int pktsched_init(void))
#elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
psched_tick_per_us = HZ<<PSCHED_JSCALE;
psched_us_per_tick = 1000000;
+#ifdef PSCHED_WATCHER
+ psched_tick(0);
+#endif
#endif
#ifdef CONFIG_RTNETLINK
- struct rtnetlink_link *link_p = rtnetlink_links[PF_UNSPEC];
+ link_p = rtnetlink_links[PF_UNSPEC];
/* Setup rtnetlink links. It is made here to avoid
exporting large number of public symbols.
*/
if (link_p) {
- link_p[RTM_NEWQDISC-RTM_BASE].doit = tc_ctl_qdisc;
- link_p[RTM_DELQDISC-RTM_BASE].doit = tc_ctl_qdisc;
- link_p[RTM_GETQDISC-RTM_BASE].doit = tc_ctl_qdisc;
+ link_p[RTM_NEWQDISC-RTM_BASE].doit = tc_modify_qdisc;
+ link_p[RTM_DELQDISC-RTM_BASE].doit = tc_get_qdisc;
+ link_p[RTM_GETQDISC-RTM_BASE].doit = tc_get_qdisc;
link_p[RTM_GETQDISC-RTM_BASE].dumpit = tc_dump_qdisc;
link_p[RTM_NEWTCLASS-RTM_BASE].doit = tc_ctl_tclass;
link_p[RTM_DELTCLASS-RTM_BASE].doit = tc_ctl_tclass;
@@ -975,6 +1132,12 @@ __initfunc(int pktsched_init(void))
#ifdef CONFIG_NET_SCH_RED
INIT_QDISC(red);
#endif
+#ifdef CONFIG_NET_SCH_GRED
+ INIT_QDISC(gred);
+#endif
+#ifdef CONFIG_NET_SCH_DSMARK
+ INIT_QDISC(dsmark);
+#endif
#ifdef CONFIG_NET_SCH_SFQ
INIT_QDISC(sfq);
#endif
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 9ae14c243..c8094a882 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -30,13 +30,13 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <net/ip.h>
#include <net/route.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
+
/* Class-Based Queueing (CBQ) algorithm.
=======================================
@@ -169,6 +169,9 @@ struct cbq_sched_data
struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
with backlog */
+#ifdef CONFIG_NET_CLS_POLICE
+ struct cbq_class *rx_class;
+#endif
struct cbq_class *tx_class;
struct cbq_class *tx_borrowed;
int tx_len;
@@ -269,17 +272,21 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch)
else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];
- if (cl == NULL)
+ if (cl == NULL || cl->level >= head->level)
goto fallback;
}
- if (cl->level == 0) {
#ifdef CONFIG_NET_CLS_POLICE
- if (result)
- return cbq_reclassify(skb, cl);
+ switch (result) {
+ case TC_POLICE_RECLASSIFY:
+ return cbq_reclassify(skb, cl);
+ case TC_POLICE_SHOT:
+ return NULL;
+ default:
+ }
#endif
+ if (cl->level == 0)
return cl;
- }
/*
* Step 3+n. If classifier selected a link sharing class,
@@ -321,11 +328,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
if (cl_tail != NULL) {
cl->next_alive = cl_tail->next_alive;
cl_tail->next_alive = cl;
- cl->deficit = 0;
} else {
cl->next_alive = cl;
q->activemask |= (1<<prio);
- cl->deficit = cl->quantum;
}
}
@@ -358,31 +363,28 @@ static void cbq_deactivate_class(struct cbq_class *this)
}
cl = cl_prev->next_alive;
- cl->deficit += cl->quantum;
return;
}
} while ((cl_prev = cl) != q->active[prio]);
}
-static __inline__ void
+static void
cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{
- if (q->toplevel > 0) {
+ int toplevel = q->toplevel;
+
+ if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
psched_time_t now;
PSCHED_GET_TIME(now);
if (PSCHED_TLESS(now, q->now))
now = q->now;
- if (PSCHED_TLESS(cl->undertime, now)) {
- q->toplevel = 0;
- return;
- }
- while ((cl = cl->borrow) != NULL
- && q->toplevel > cl->level) {
- if (PSCHED_TLESS(cl->borrow->undertime, now)) {
+
+ do {
+ if (PSCHED_TLESS(cl->undertime, now)) {
q->toplevel = cl->level;
return;
}
- }
+ } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
}
}
@@ -393,23 +395,31 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct cbq_class *cl = cbq_classify(skb, sch);
int len = skb->len;
- if (cl && cl->q->enqueue(skb, cl->q) == 1) {
- sch->q.qlen++;
- sch->stats.packets++;
- cl->stats.packets++;
- sch->stats.bytes+=len;
- cl->stats.bytes+=len;
- cbq_mark_toplevel(q, cl);
- if (!cl->next_alive)
- cbq_activate_class(cl);
- return 1;
+#ifdef CONFIG_NET_CLS_POLICE
+ q->rx_class = cl;
+#endif
+ if (cl) {
+#ifdef CONFIG_NET_CLS_POLICE
+ cl->q->__parent = sch;
+#endif
+ if (cl->q->enqueue(skb, cl->q) == 1) {
+ sch->q.qlen++;
+ sch->stats.packets++;
+ sch->stats.bytes+=len;
+ cbq_mark_toplevel(q, cl);
+ if (!cl->next_alive)
+ cbq_activate_class(cl);
+ return 1;
+ }
}
sch->stats.drops++;
if (cl == NULL)
kfree_skb(skb);
- else
+ else {
+ cbq_mark_toplevel(q, cl);
cl->stats.drops++;
+ }
return 0;
}
@@ -426,9 +436,14 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
}
q->tx_class = NULL;
+ cbq_mark_toplevel(q, cl);
+
+#ifdef CONFIG_NET_CLS_POLICE
+ q->rx_class = cl;
+ cl->q->__parent = sch;
+#endif
if (cl->q->ops->requeue(skb, cl->q) == 1) {
sch->q.qlen++;
- cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
return 1;
@@ -445,11 +460,9 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
static void cbq_ovl_classic(struct cbq_class *cl)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
- psched_tdiff_t delay;
-
- delay = PSCHED_TDIFF(cl->undertime, q->now);
delay += cl->offtime;
/*
@@ -463,15 +476,35 @@ static void cbq_ovl_classic(struct cbq_class *cl)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
if (cl->avgidle < cl->minidle)
cl->avgidle = cl->minidle;
- if (delay < 0)
- delay = 0;
+ if (delay <= 0)
+ delay = 1;
PSCHED_TADD2(q->now, delay, cl->undertime);
- if (q->wd_expires == 0 || q->wd_expires > delay)
- q->wd_expires = delay;
cl->xstats.overactions++;
cl->delayed = 1;
}
+ if (q->wd_expires == 0 || q->wd_expires > delay)
+ q->wd_expires = delay;
+
+ /* Dirty work! We must schedule wakeups based on
+ real available rate, rather than leaf rate,
+ which may be tiny (even zero).
+ */
+ if (q->toplevel == TC_CBQ_MAXLEVEL) {
+ struct cbq_class *b;
+ psched_tdiff_t base_delay = q->wd_expires;
+
+ for (b = cl->borrow; b; b = b->borrow) {
+ delay = PSCHED_TDIFF(b->undertime, q->now);
+ if (delay < base_delay) {
+ if (delay <= 0)
+ delay = 1;
+ base_delay = delay;
+ }
+ }
+
+ q->wd_expires = delay;
+ }
}
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
@@ -481,15 +514,18 @@ static void cbq_ovl_classic(struct cbq_class *cl)
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ struct cbq_class *this = cl;
- while (cl && cl->delayed) {
- cl = cl->borrow;
- if (cl->level > q->toplevel)
- return;
- }
+ do {
+ if (cl->level > q->toplevel) {
+ cl = NULL;
+ break;
+ }
+ } while ((cl = cl->borrow) != NULL);
- if (cl)
- cbq_ovl_classic(cl);
+ if (cl == NULL)
+ cl = this;
+ cbq_ovl_classic(cl);
}
/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
@@ -497,12 +533,11 @@ static void cbq_ovl_rclassic(struct cbq_class *cl)
static void cbq_ovl_delay(struct cbq_class *cl)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)cl->qdisc->data;
+ psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
- psched_tdiff_t delay;
unsigned long sched = jiffies;
- delay = PSCHED_TDIFF(cl->undertime, q->now);
delay += cl->offtime;
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -521,8 +556,12 @@ static void cbq_ovl_delay(struct cbq_class *cl)
add_timer(&q->delay_timer);
cl->delayed = 1;
cl->xstats.overactions++;
+ return;
}
+ delay = 1;
}
+ if (q->wd_expires == 0 || q->wd_expires > delay)
+ q->wd_expires = delay;
}
/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
@@ -555,6 +594,7 @@ static void cbq_ovl_drop(struct cbq_class *cl)
static void cbq_watchdog(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
+ sch->flags &= ~TCQ_F_THROTTLED;
qdisc_wakeup(sch->dev);
}
@@ -622,6 +662,7 @@ static void cbq_undelay(unsigned long arg)
add_timer(&q->delay_timer);
}
+ sch->flags &= ~TCQ_F_THROTTLED;
qdisc_wakeup(sch->dev);
}
@@ -631,18 +672,23 @@ static void cbq_undelay(unsigned long arg)
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
{
int len = skb->len;
- struct Qdisc *sch = child->parent;
+ struct Qdisc *sch = child->__parent;
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
- struct cbq_class *cl = cbq_class_lookup(q, child->classid);
+ struct cbq_class *cl = q->rx_class;
+
+ q->rx_class = NULL;
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
+
+ cbq_mark_toplevel(q, cl);
+
+ q->rx_class = cl;
+ cl->q->__parent = sch;
+
if (cl->q->enqueue(skb, cl->q) == 1) {
sch->q.qlen++;
sch->stats.packets++;
- cl->stats.packets++;
sch->stats.bytes+=len;
- cl->stats.bytes+=len;
- cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -656,21 +702,42 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
}
#endif
+/*
+ It is mission critical procedure.
+
+ We "regenerate" toplevel cutoff, if transmitting class
+ has backlog and it is not regulated. It is not part of
+ original CBQ description, but looks more reasonable.
+ Probably, it is wrong. This question needs further investigation.
+*/
+
static __inline__ void
-cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
+cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
+ struct cbq_class *borrowed)
{
- if (cl && q->toplevel >= cl->level) {
- if (cl->q->q.qlen <= 1 || PSCHED_TLESS(q->now, cl->undertime))
- q->toplevel = TC_CBQ_MAXLEVEL;
- else /* BUGGGG? if (cl != this) */
- q->toplevel = cl->level;
+ if (cl && q->toplevel >= borrowed->level) {
+ if (cl->q->q.qlen > 1) {
+ do {
+ if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) {
+ q->toplevel = borrowed->level;
+ return;
+ }
+ } while ((borrowed=borrowed->borrow) != NULL);
+ }
+#if 0
+ /* It is not necessary now. Uncommenting it
+ will save CPU cycles, but decrease fairness.
+ */
+ q->toplevel = TC_CBQ_MAXLEVEL;
+#endif
}
}
-static __inline__ void
+static void
cbq_update(struct cbq_sched_data *q)
{
- struct cbq_class *cl = q->tx_class;
+ struct cbq_class *this = q->tx_class;
+ struct cbq_class *cl = this;
int len = q->tx_len;
q->tx_class = NULL;
@@ -679,6 +746,9 @@ cbq_update(struct cbq_sched_data *q)
long avgidle = cl->avgidle;
long idle;
+ cl->stats.packets++;
+ cl->stats.bytes += len;
+
/*
(now - last) is total time between packet right edges.
(last_pktlen/rate) is "virtual" busy time, so that
@@ -697,6 +767,10 @@ cbq_update(struct cbq_sched_data *q)
if (avgidle <= 0) {
/* Overlimit or at-limit */
+
+ if (avgidle < cl->minidle)
+ avgidle = cl->minidle;
+
cl->avgidle = avgidle;
/* Calculate expected time, when this class
@@ -732,12 +806,11 @@ cbq_update(struct cbq_sched_data *q)
cl->avgidle = cl->maxidle;
else
cl->avgidle = avgidle;
-
}
cl->last = q->now;
}
- cbq_update_toplevel(q, q->tx_borrowed);
+ cbq_update_toplevel(q, this, q->tx_borrowed);
}
static __inline__ struct cbq_class *
@@ -750,21 +823,33 @@ cbq_under_limit(struct cbq_class *cl)
return cl;
if (PSCHED_IS_PASTPERFECT(cl->undertime) ||
- PSCHED_TLESS(cl->undertime, q->now)) {
+ !PSCHED_TLESS(q->now, cl->undertime)) {
cl->delayed = 0;
return cl;
}
- while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
- PSCHED_TLESS(q->now, cl->undertime)) {
- if ((cl = cl->borrow) == NULL || cl->level > q->toplevel) {
+ do {
+ /* It is very suspicious place. Now overlimit
+ action is generated for not bounded classes
+ only if link is completely congested.
+ Though it is in agree with ancestor-only paradigm,
+ it looks very stupid. Particularly,
+ it means that this chunk of code will either
+ never be called or result in strong amplification
+ of burstiness. Dangerous, silly, and, however,
+ no another solution exists.
+ */
+ if ((cl = cl->borrow) == NULL) {
this_cl->stats.overlimits++;
this_cl->overlimit(this_cl);
return NULL;
}
- }
- this_cl->xstats.borrows++;
- cl->xstats.borrows++;
+ if (cl->level > q->toplevel)
+ return NULL;
+ } while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
+ PSCHED_TLESS(q->now, cl->undertime));
+
+ cl->delayed = 0;
return cl;
}
@@ -784,27 +869,26 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
/* Start round */
do {
- struct cbq_class *borrow;
+ struct cbq_class *borrow = NULL;
- /* Class is empty */
- if (cl->q->q.qlen == 0)
- goto skip_class;
-
- if ((borrow = cbq_under_limit(cl)) == NULL)
+ if (cl->q->q.qlen &&
+ (borrow = cbq_under_limit(cl)) == NULL)
goto skip_class;
if (cl->deficit <= 0) {
- /* Class exhausted its allotment per this
- round.
+ /* Class exhausted its allotment per
+ this round. Switch to the next one.
*/
deficit = 1;
+ cl->deficit += cl->quantum;
goto next_class;
}
skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-(
- It could occur if cl->q == "tbf"
+ It could occur even if cl->q->q.qlen != 0
+ f.e. if cl->q == "tbf"
*/
if (skb == NULL)
goto skip_class;
@@ -812,6 +896,15 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
cl->deficit -= skb->len;
q->tx_class = cl;
q->tx_borrowed = borrow;
+ if (borrow != cl) {
+#ifndef CBQ_XSTATS_BORROWS_BYTES
+ borrow->xstats.borrows++;
+ cl->xstats.borrows++;
+#else
+ borrow->xstats.borrows += skb->len;
+ cl->xstats.borrows += skb->len;
+#endif
+ }
q->tx_len = skb->len;
if (cl->deficit <= 0) {
@@ -822,8 +915,6 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
return skb;
skip_class:
- cl->deficit = 0;
-
if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
/* Class is empty or penalized.
Unlink it from active chain.
@@ -857,7 +948,6 @@ skip_class:
next_class:
cl_prev = cl;
cl = cl->next_alive;
- cl->deficit += cl->quantum;
} while (cl_prev != cl_tail);
} while (deficit);
@@ -914,6 +1004,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
sch->q.qlen--;
+ sch->flags &= ~TCQ_F_THROTTLED;
return skb;
}
@@ -955,6 +1046,7 @@ cbq_dequeue(struct Qdisc *sch)
delay = 1;
q->wd_timer.expires = jiffies + delay;
add_timer(&q->wd_timer);
+ sch->flags |= TCQ_F_THROTTLED;
}
}
return NULL;
@@ -1129,14 +1221,18 @@ static void cbq_link_class(struct cbq_class *this)
static int cbq_drop(struct Qdisc* sch)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
- struct cbq_class *cl;
- int h;
+ struct cbq_class *cl, *cl_head;
+ int prio;
- for (h = TC_CBQ_MAXPRIO; h >= 0; h++) {
- for (cl = q->classes[h]; cl; cl = cl->next) {
+ for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio++) {
+ if ((cl_head = q->active[prio]) == NULL)
+ continue;
+
+ cl = cl_head;
+ do {
if (cl->q->ops->drop && cl->q->ops->drop(cl->q))
return 1;
- }
+ } while ((cl = cl->next_alive) != cl_head);
}
return 0;
}
@@ -1166,8 +1262,8 @@ cbq_reset(struct Qdisc* sch)
cl->next_alive = NULL;
PSCHED_SET_PASTPERFECT(cl->undertime);
- cl->avgidle = 0;
- cl->deficit = 0;
+ cl->avgidle = cl->maxidle;
+ cl->deficit = cl->quantum;
cl->cpriority = cl->priority;
}
}
@@ -1187,8 +1283,10 @@ static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
cl->avpkt = lss->avpkt;
if (lss->change&TCF_CBQ_LSS_MINIDLE)
cl->minidle = -(long)lss->minidle;
- if (lss->change&TCF_CBQ_LSS_MAXIDLE)
+ if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
cl->maxidle = lss->maxidle;
+ cl->avgidle = lss->maxidle;
+ }
if (lss->change&TCF_CBQ_LSS_OFFTIME)
cl->offtime = lss->offtime;
return 0;
@@ -1261,7 +1359,7 @@ static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
{
cl->police = p->police;
- if (!(cl->q->flags&TCQ_F_DEFAULT)) {
+ if (cl->q->handle) {
if (p->police == TC_POLICE_RECLASSIFY)
cl->q->reshape_fail = cbq_reshape_fail;
else
@@ -1300,6 +1398,7 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
return -EINVAL;
}
+ q->link.refcnt = 1;
q->link.sibling = &q->link;
q->link.classid = sch->handle;
q->link.qdisc = sch;
@@ -1493,6 +1592,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
else
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle = cl->classid;
+ tcm->tcm_info = cl->q->handle;
rta = (struct rtattr*)b;
RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
@@ -1533,12 +1633,20 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
}
if ((*old = xchg(&cl->q, new)) != NULL)
qdisc_reset(*old);
-
+
return 0;
}
return -ENOENT;
}
+static struct Qdisc *
+cbq_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct cbq_class *cl = (struct cbq_class*)arg;
+
+ return cl ? cl->q : NULL;
+}
+
static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
@@ -1569,6 +1677,7 @@ static void cbq_destroy_class(struct cbq_class *cl)
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator(&cl->stats);
#endif
+ kfree(cl);
}
static void
@@ -1578,6 +1687,9 @@ cbq_destroy(struct Qdisc* sch)
struct cbq_class *cl;
unsigned h;
+#ifdef CONFIG_NET_CLS_POLICE
+ q->rx_class = NULL;
+#endif
for (h = 0; h < 16; h++) {
for (cl = q->classes[h]; cl; cl = cl->next)
cbq_destroy_filters(cl);
@@ -1590,20 +1702,29 @@ cbq_destroy(struct Qdisc* sch)
}
qdisc_put_rtab(q->link.R_tab);
+ MOD_DEC_USE_COUNT;
}
-static void cbq_put(struct Qdisc *q, unsigned long arg)
+static void cbq_put(struct Qdisc *sch, unsigned long arg)
{
+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
struct cbq_class *cl = (struct cbq_class*)arg;
- if (--cl->refcnt == 0)
+ start_bh_atomic();
+ if (--cl->refcnt == 0) {
+#ifdef CONFIG_NET_CLS_POLICE
+ if (q->rx_class == cl)
+ q->rx_class = NULL;
+#endif
cbq_destroy_class(cl);
+ }
+ end_bh_atomic();
return;
}
static int
-cbq_change(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
- unsigned long *arg)
+cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
+ unsigned long *arg)
{
int err;
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
@@ -1763,6 +1884,7 @@ cbq_change(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
cl->borrow = cl->tparent;
if (cl->tparent != &q->link)
cl->share = cl->tparent;
+ cbq_adjust_levels(parent);
cl->minidle = -0x7FFFFFFF;
cbq_set_lss(cl, RTA_DATA(tb[TCA_CBQ_LSSOPT-1]));
cbq_set_wrr(cl, RTA_DATA(tb[TCA_CBQ_WRROPT-1]));
@@ -1781,7 +1903,6 @@ cbq_change(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
#endif
if (tb[TCA_CBQ_FOPT-1])
cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
- cbq_adjust_levels(parent);
end_bh_atomic();
#ifdef CONFIG_NET_ESTIMATOR
@@ -1810,10 +1931,16 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
if (cl->next_alive)
cbq_deactivate_class(cl);
- if (q->tx_class == cl)
- q->tx_class = cl->borrow;
if (q->tx_borrowed == cl)
q->tx_borrowed = q->tx_class;
+ if (q->tx_class == cl) {
+ q->tx_class = NULL;
+ q->tx_borrowed = NULL;
+ }
+#ifdef CONFIG_NET_CLS_POLICE
+ if (q->rx_class == cl)
+ q->rx_class = NULL;
+#endif
cbq_unlink_class(cl);
cbq_adjust_levels(cl->tparent);
@@ -1841,12 +1968,16 @@ static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
return &cl->filter_list;
}
-static unsigned long cbq_bind_filter(struct Qdisc *sch, u32 classid)
+static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
{
struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
+ struct cbq_class *p = (struct cbq_class*)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
+ if (p && p->level <= cl->level)
+ return 0;
cl->filters++;
return (unsigned long)cl;
}
@@ -1878,7 +2009,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
arg->stop = 1;
- break;
+ return;
}
arg->count++;
}
@@ -1888,9 +2019,10 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
static struct Qdisc_class_ops cbq_class_ops =
{
cbq_graft,
+ cbq_leaf,
cbq_get,
cbq_put,
- cbq_change,
+ cbq_change_class,
cbq_delete,
cbq_walk,
@@ -1918,6 +2050,7 @@ struct Qdisc_ops cbq_qdisc_ops =
cbq_init,
cbq_reset,
cbq_destroy,
+ NULL /* cbq_change */,
#ifdef CONFIG_RTNETLINK
cbq_dump,
diff --git a/net/sched/sch_csz.c b/net/sched/sch_csz.c
index 9bdc656c9..2202fd81a 100644
--- a/net/sched/sch_csz.c
+++ b/net/sched/sch_csz.c
@@ -826,6 +826,12 @@ static int csz_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
return -EINVAL;
}
+static struct Qdisc * csz_leaf(struct Qdisc *sch, unsigned long cl)
+{
+ return NULL;
+}
+
+
static unsigned long csz_get(struct Qdisc *sch, u32 classid)
{
struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
@@ -840,6 +846,12 @@ static unsigned long csz_get(struct Qdisc *sch, u32 classid)
return band+1;
}
+static unsigned long csz_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
+{
+ return csz_get(sch, classid);
+}
+
+
static void csz_put(struct Qdisc *sch, unsigned long cl)
{
return;
@@ -1006,6 +1018,8 @@ static struct tcf_proto ** csz_find_tcf(struct Qdisc *sch, unsigned long cl)
struct Qdisc_class_ops csz_class_ops =
{
csz_graft,
+ csz_leaf,
+
csz_get,
csz_put,
csz_change,
@@ -1013,7 +1027,7 @@ struct Qdisc_class_ops csz_class_ops =
csz_walk,
csz_find_tcf,
- csz_get,
+ csz_bind,
csz_put,
#ifdef CONFIG_RTNETLINK
@@ -1036,6 +1050,7 @@ struct Qdisc_ops csz_qdisc_ops =
csz_init,
csz_reset,
csz_destroy,
+ NULL /* csz_change */,
#ifdef CONFIG_RTNETLINK
csz_dump,
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 14bc8bb8b..c93f206a2 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -97,10 +97,7 @@ fifo_drop(struct Qdisc* sch)
static void
fifo_reset(struct Qdisc* sch)
{
- struct sk_buff *skb;
-
- while ((skb=__skb_dequeue(&sch->q)) != NULL)
- kfree_skb(skb);
+ skb_queue_purge(&sch->q);
sch->stats.backlog = 0;
}
@@ -137,15 +134,15 @@ pfifo_dequeue(struct Qdisc* sch)
return __skb_dequeue(&sch->q);
}
-
static int fifo_init(struct Qdisc *sch, struct rtattr *opt)
{
struct fifo_sched_data *q = (void*)sch->data;
if (opt == NULL) {
- q->limit = sch->dev->tx_queue_len;
if (sch->ops == &bfifo_qdisc_ops)
- q->limit *= sch->dev->mtu;
+ q->limit = sch->dev->tx_queue_len*sch->dev->mtu;
+ else
+ q->limit = sch->dev->tx_queue_len;
} else {
struct tc_fifo_qopt *ctl = RTA_DATA(opt);
if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
@@ -188,6 +185,8 @@ struct Qdisc_ops pfifo_qdisc_ops =
fifo_init,
fifo_reset,
NULL,
+ fifo_init,
+
#ifdef CONFIG_RTNETLINK
fifo_dump,
#endif
@@ -208,6 +207,7 @@ struct Qdisc_ops bfifo_qdisc_ops =
fifo_init,
fifo_reset,
NULL,
+ fifo_init,
#ifdef CONFIG_RTNETLINK
fifo_dump,
#endif
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7ba2e94cc..ba40033e5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -184,7 +184,7 @@ struct Qdisc noop_qdisc =
{ NULL },
noop_enqueue,
noop_dequeue,
- TCQ_F_DEFAULT|TCQ_F_BUILTIN,
+ TCQ_F_BUILTIN,
&noop_qdisc_ops,
};
@@ -207,7 +207,7 @@ struct Qdisc noqueue_qdisc =
{ NULL },
NULL,
NULL,
- TCQ_F_DEFAULT|TCQ_F_BUILTIN,
+ TCQ_F_BUILTIN,
&noqueue_qdisc_ops,
};
@@ -322,8 +322,8 @@ struct Qdisc * qdisc_create_dflt(struct device *dev, struct Qdisc_ops *ops)
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev = dev;
- sch->flags |= TCQ_F_DEFAULT;
- if (ops->init && ops->init(sch, NULL) == 0)
+ atomic_set(&sch->refcnt, 1);
+ if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
kfree(sch);
@@ -342,6 +342,10 @@ void qdisc_reset(struct Qdisc *qdisc)
void qdisc_destroy(struct Qdisc *qdisc)
{
struct Qdisc_ops *ops = qdisc->ops;
+
+ if (!atomic_dec_and_test(&qdisc->refcnt))
+ return;
+
#ifdef CONFIG_NET_SCHED
if (qdisc->dev) {
struct Qdisc *q, **qp;
@@ -444,30 +448,3 @@ void dev_shutdown(struct device *dev)
end_bh_atomic();
}
-struct Qdisc * dev_set_scheduler(struct device *dev, struct Qdisc *qdisc)
-{
- struct Qdisc *oqdisc;
-
- if (dev->flags & IFF_UP)
- dev_deactivate(dev);
-
- start_bh_atomic();
- oqdisc = dev->qdisc_sleeping;
-
- /* Prune old scheduler */
- if (oqdisc)
- qdisc_reset(oqdisc);
-
- /* ... and graft new one */
- if (qdisc == NULL)
- qdisc = &noop_qdisc;
- dev->qdisc_sleeping = qdisc;
- dev->qdisc = &noop_qdisc;
- end_bh_atomic();
-
- if (dev->flags & IFF_UP)
- dev_activate(dev);
-
- return oqdisc;
-}
-
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 5b7b39fea..5222d149d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -49,17 +49,19 @@ static __inline__ unsigned prio_classify(struct sk_buff *skb, struct Qdisc *sch)
{
struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
struct tcf_result res;
+ u32 band;
- res.classid = skb->priority;
- if (TC_H_MAJ(res.classid) != sch->handle) {
+ band = skb->priority;
+ if (TC_H_MAJ(skb->priority) != sch->handle) {
if (!q->filter_list || tc_classify(skb, q->filter_list, &res)) {
- if (TC_H_MAJ(res.classid))
- res.classid = 0;
- res.classid = q->prio2band[res.classid&TC_PRIO_MAX] + 1;
+ if (TC_H_MAJ(band))
+ band = 0;
+ return q->prio2band[band&TC_PRIO_MAX];
}
+ band = res.classid;
}
-
- return res.classid - 1;
+ band = TC_H_MIN(band) - 1;
+ return band < q->bands ? band : q->prio2band[0];
}
static int
@@ -160,38 +162,74 @@ prio_destroy(struct Qdisc* sch)
MOD_DEC_USE_COUNT;
}
+static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ struct tc_prio_qopt *qopt = RTA_DATA(opt);
+ int i;
+
+ if (opt->rta_len < RTA_LENGTH(sizeof(*qopt)))
+ return -EINVAL;
+ if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
+ return -EINVAL;
+
+ for (i=0; i<=TC_PRIO_MAX; i++) {
+ if (qopt->priomap[i] >= qopt->bands)
+ return -EINVAL;
+ }
+
+ start_bh_atomic();
+ q->bands = qopt->bands;
+ memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
+
+ for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+ struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
+ if (child != &noop_qdisc)
+ qdisc_destroy(child);
+ }
+ end_bh_atomic();
+
+ for (i=0; i<=TC_PRIO_MAX; i++) {
+ int band = q->prio2band[i];
+ if (q->queues[band] == &noop_qdisc) {
+ struct Qdisc *child;
+ child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ if (child) {
+ child = xchg(&q->queues[band], child);
+ synchronize_bh();
+
+ if (child != &noop_qdisc)
+ qdisc_destroy(child);
+ }
+ }
+ }
+ return 0;
+}
+
static int prio_init(struct Qdisc *sch, struct rtattr *opt)
{
static const u8 prio2band[TC_PRIO_MAX+1] =
{ 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 };
struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
- unsigned mask = 0;
int i;
+ for (i=0; i<TCQ_PRIO_BANDS; i++)
+ q->queues[i] = &noop_qdisc;
+
if (opt == NULL) {
q->bands = 3;
memcpy(q->prio2band, prio2band, sizeof(prio2band));
- mask = 7;
+ for (i=0; i<3; i++) {
+ struct Qdisc *child;
+ child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ if (child)
+ q->queues[i] = child;
+ }
} else {
- struct tc_prio_qopt *qopt = RTA_DATA(opt);
+ int err;
- if (opt->rta_len < RTA_LENGTH(sizeof(*qopt)))
- return -EINVAL;
- if (qopt->bands > TCQ_PRIO_BANDS)
- return -EINVAL;
- q->bands = qopt->bands;
- for (i=0; i<=TC_PRIO_MAX; i++) {
- if (qopt->priomap[i] >= q->bands)
- return -EINVAL;
- q->prio2band[i] = qopt->priomap[i];
- mask |= (1<<qopt->priomap[i]);
- }
- }
- for (i=0; i<TCQ_PRIO_BANDS; i++) {
- if (mask&(1<<i))
- q->queues[i] = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
- if (q->queues[i] == NULL)
- q->queues[i] = &noop_qdisc;
+ if ((err= prio_tune(sch, opt)) != 0)
+ return err;
}
MOD_INC_USE_COUNT;
return 0;
@@ -232,6 +270,18 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return 0;
}
+static struct Qdisc *
+prio_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
+ unsigned long band = arg - 1;
+
+ if (band >= q->bands)
+ return NULL;
+
+ return q->queues[band];
+}
+
static unsigned long prio_get(struct Qdisc *sch, u32 classid)
{
struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
@@ -242,6 +292,12 @@ static unsigned long prio_get(struct Qdisc *sch, u32 classid)
return band;
}
+static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
+{
+ return prio_get(sch, classid);
+}
+
+
static void prio_put(struct Qdisc *q, unsigned long cl)
{
return;
@@ -267,12 +323,15 @@ static int prio_delete(struct Qdisc *sch, unsigned long cl)
#ifdef CONFIG_RTNETLINK
-static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm)
+static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
+ struct tcmsg *tcm)
{
struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
if (cl - 1 > q->bands)
return -ENOENT;
+ if (q->queues[cl-1])
+ tcm->tcm_info = q->queues[cl-1]->handle;
return 0;
}
#endif
@@ -310,6 +369,8 @@ static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
static struct Qdisc_class_ops prio_class_ops =
{
prio_graft,
+ prio_leaf,
+
prio_get,
prio_put,
prio_change,
@@ -317,7 +378,7 @@ static struct Qdisc_class_ops prio_class_ops =
prio_walk,
prio_find_tcf,
- prio_get,
+ prio_bind,
prio_put,
#ifdef CONFIG_RTNETLINK
@@ -340,6 +401,7 @@ struct Qdisc_ops prio_qdisc_ops =
prio_init,
prio_reset,
prio_destroy,
+ prio_tune,
#ifdef CONFIG_RTNETLINK
prio_dump,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index eac678b83..30b537b53 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -193,8 +193,8 @@ red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
if (q->qave < q->qth_min) {
-enqueue:
q->qcount = -1;
+enqueue:
if (sch->stats.backlog <= q->limit) {
__skb_queue_tail(&sch->q, skb);
sch->stats.backlog += skb->len;
@@ -231,6 +231,7 @@ drop:
*/
if (((q->qave - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
goto enqueue;
+printk(KERN_DEBUG "Drop %d\n", q->qcount);
q->qcount = 0;
q->qR = net_random()&q->Rmask;
sch->stats.overlimits++;
@@ -375,6 +376,7 @@ struct Qdisc_ops red_qdisc_ops =
red_init,
red_reset,
red_destroy,
+ NULL /* red_change */,
#ifdef CONFIG_RTNETLINK
red_dump,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c6f43badc..8baf254eb 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -14,7 +14,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/bitops.h>
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -380,6 +379,27 @@ static void sfq_perturbation(unsigned long arg)
}
}
+static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
+ struct tc_sfq_qopt *ctl = RTA_DATA(opt);
+
+ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
+ return -EINVAL;
+
+ start_bh_atomic();
+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
+ q->perturb_period = ctl->perturb_period*HZ;
+
+ del_timer(&q->perturb_timer);
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+ end_bh_atomic();
+ return 0;
+}
+
static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
{
struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
@@ -399,24 +419,15 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
q->max_depth = 0;
q->tail = SFQ_DEPTH;
if (opt == NULL) {
- q->quantum = sch->dev->mtu;
+ q->quantum = psched_mtu(sch->dev);
q->perturb_period = 0;
- if (sch->dev->hard_header)
- q->quantum += sch->dev->hard_header_len;
} else {
- struct tc_sfq_qopt *ctl = RTA_DATA(opt);
- if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
- return -EINVAL;
- q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
- q->perturb_period = ctl->perturb_period*HZ;
- /* The rest is compiled in */
+ int err = sfq_change(sch, opt);
+ if (err)
+ return err;
}
for (i=0; i<SFQ_DEPTH; i++)
sfq_link(q, i);
- if (q->perturb_period) {
- q->perturb_timer.expires = jiffies + q->perturb_period;
- add_timer(&q->perturb_timer);
- }
MOD_INC_USE_COUNT;
return 0;
}
@@ -467,6 +478,7 @@ struct Qdisc_ops sfq_qdisc_ops =
sfq_init,
sfq_reset,
sfq_destroy,
+ NULL, /* sfq_change */
#ifdef CONFIG_RTNETLINK
sfq_dump,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 83d6da87c..a4d13b628 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -114,6 +114,7 @@ struct tbf_sched_data
u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
u32 mtu;
+ u32 max_size;
struct qdisc_rate_table *R_tab;
struct qdisc_rate_table *P_tab;
@@ -132,6 +133,8 @@ tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+ if (skb->len > q->max_size)
+ goto drop;
__skb_queue_tail(&sch->q, skb);
if ((sch->stats.backlog += skb->len) <= q->limit) {
sch->stats.bytes += skb->len;
@@ -145,6 +148,8 @@ tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
__skb_unlink(skb, &sch->q);
sch->stats.backlog -= skb->len;
+
+drop:
sch->stats.drops++;
#ifdef CONFIG_NET_CLS_POLICE
if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
@@ -180,6 +185,7 @@ static void tbf_watchdog(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc*)arg;
+ sch->flags &= ~TCQ_F_THROTTLED;
qdisc_wakeup(sch->dev);
}
@@ -216,6 +222,7 @@ tbf_dequeue(struct Qdisc* sch)
q->tokens = toks;
q->ptokens = ptoks;
sch->stats.backlog -= skb->len;
+ sch->flags &= ~TCQ_F_THROTTLED;
return skb;
}
@@ -238,10 +245,11 @@ tbf_dequeue(struct Qdisc* sch)
Really, if we split the flow into independent
subflows, it would be a very good solution.
This is the main idea of all FQ algorithms
- (cf. CSZ, HPFQ, HFCS)
+ (cf. CSZ, HPFQ, HFSC)
*/
__skb_queue_head(&sch->q, skb);
+ sch->flags |= TCQ_F_THROTTLED;
sch->stats.overlimits++;
}
return NULL;
@@ -258,53 +266,86 @@ tbf_reset(struct Qdisc* sch)
PSCHED_GET_TIME(q->t_c);
q->tokens = q->buffer;
q->ptokens = q->mtu;
+ sch->flags &= ~TCQ_F_THROTTLED;
del_timer(&q->wd_timer);
}
-static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
+static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
{
+ int err = -EINVAL;
struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
struct rtattr *tb[TCA_TBF_PTAB];
struct tc_tbf_qopt *qopt;
+ struct qdisc_rate_table *rtab = NULL;
+ struct qdisc_rate_table *ptab = NULL;
+ int max_size;
- MOD_INC_USE_COUNT;
-
- if (opt == NULL ||
- rtattr_parse(tb, TCA_TBF_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
+ if (rtattr_parse(tb, TCA_TBF_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
tb[TCA_TBF_PARMS-1] == NULL ||
- RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt)) {
- MOD_DEC_USE_COUNT;
- return -EINVAL;
- }
+ RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
+ goto done;
qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
- q->R_tab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
- if (q->R_tab == NULL) {
- MOD_DEC_USE_COUNT;
- return -EINVAL;
- }
+ rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
+ if (rtab == NULL)
+ goto done;
if (qopt->peakrate.rate) {
- q->P_tab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_PTAB-1]);
- if (q->P_tab == NULL) {
- MOD_DEC_USE_COUNT;
- qdisc_put_rtab(q->R_tab);
- return -EINVAL;
+ if (qopt->peakrate.rate > qopt->rate.rate)
+ ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
+ if (ptab == NULL)
+ goto done;
+ }
+
+ max_size = psched_mtu(sch->dev);
+ if (ptab) {
+ int n = max_size>>qopt->peakrate.cell_log;
+ while (n>0 && ptab->data[n-1] > qopt->mtu) {
+ max_size -= (1<<qopt->peakrate.cell_log);
+ n--;
}
}
+ if (rtab->data[max_size>>qopt->rate.cell_log] > qopt->buffer)
+ goto done;
- PSCHED_GET_TIME(q->t_c);
- init_timer(&q->wd_timer);
- q->wd_timer.function = tbf_watchdog;
- q->wd_timer.data = (unsigned long)sch;
+ start_bh_atomic();
q->limit = qopt->limit;
q->mtu = qopt->mtu;
- if (q->mtu == 0)
- q->mtu = psched_mtu(sch->dev);
+ q->max_size = max_size;
q->buffer = qopt->buffer;
q->tokens = q->buffer;
q->ptokens = q->mtu;
- return 0;
+ rtab = xchg(&q->R_tab, rtab);
+ ptab = xchg(&q->P_tab, ptab);
+ end_bh_atomic();
+ err = 0;
+done:
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ if (ptab)
+ qdisc_put_rtab(ptab);
+ return err;
+}
+
+static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
+{
+ int err;
+ struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
+
+ if (opt == NULL)
+ return -EINVAL;
+
+ MOD_INC_USE_COUNT;
+
+ PSCHED_GET_TIME(q->t_c);
+ init_timer(&q->wd_timer);
+ q->wd_timer.function = tbf_watchdog;
+ q->wd_timer.data = (unsigned long)sch;
+
+ if ((err = tbf_change(sch, opt)) != 0) {
+ MOD_DEC_USE_COUNT;
+ }
+ return err;
}
static void tbf_destroy(struct Qdisc *sch)
@@ -328,10 +369,10 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_tbf_qopt opt;
-
+
rta = (struct rtattr*)b;
RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
-
+
opt.limit = q->limit;
opt.rate = q->R_tab->rate;
if (q->P_tab)
@@ -366,6 +407,7 @@ struct Qdisc_ops tbf_qdisc_ops =
tbf_init,
tbf_reset,
tbf_destroy,
+ tbf_change,
#ifdef CONFIG_RTNETLINK
tbf_dump,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 212e6f696..66040d5e9 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -444,6 +444,7 @@ static struct teql_master the_master = {
teql_qdisc_init,
teql_reset,
teql_destroy,
+ NULL,
},};
diff --git a/net/socket.c b/net/socket.c
index a6d741e82..181effb79 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -41,6 +41,7 @@
* Kevin Buhr : Fixed the dumb errors in the above.
* Andi Kleen : Some small cleanups, optimizations,
* and fixed a copy_from_user() bug.
+ * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0)
*
*
* This program is free software; you can redistribute it and/or
@@ -278,8 +279,8 @@ struct socket *sock_alloc(void)
inode->i_mode = S_IFSOCK|S_IRWXUGO;
inode->i_sock = 1;
- inode->i_uid = current->uid;
- inode->i_gid = current->gid;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
sock->inode = inode;
init_waitqueue(&sock->wait);
@@ -929,40 +930,6 @@ asmlinkage int sys_getpeername(int fd, struct sockaddr *usockaddr, int *usockadd
}
/*
- * Send a datagram down a socket. The datagram as with write() is
- * in user space. We check it can be read.
- */
-
-asmlinkage int sys_send(int fd, void * buff, size_t len, unsigned flags)
-{
- struct socket *sock;
- int err;
- struct msghdr msg;
- struct iovec iov;
-
- lock_kernel();
- sock = sockfd_lookup(fd, &err);
- if (sock) {
- iov.iov_base=buff;
- iov.iov_len=len;
- msg.msg_name=NULL;
- msg.msg_namelen=0;
- msg.msg_iov=&iov;
- msg.msg_iovlen=1;
- msg.msg_control=NULL;
- msg.msg_controllen=0;
- if (sock->file->f_flags & O_NONBLOCK)
- flags |= MSG_DONTWAIT;
- msg.msg_flags = flags;
- err = sock_sendmsg(sock, &msg, len);
-
- sockfd_put(sock);
- }
- unlock_kernel();
- return err;
-}
-
-/*
* Send a datagram to a given address. We move the address into kernel
* space and check the user space data area is readable before invoking
* the protocol.
@@ -1008,6 +975,14 @@ out:
return err;
}
+/*
+ * Send a datagram down a socket.
+ */
+
+asmlinkage int sys_send(int fd, void * buff, size_t len, unsigned flags)
+{
+ return sys_sendto(fd, buff, len, flags, NULL, 0);
+}
/*
* Receive a frame from the socket and optionally record the address of the
@@ -1059,7 +1034,7 @@ out:
asmlinkage int sys_recv(int fd, void * ubuf, size_t size, unsigned flags)
{
- return sys_recvfrom(fd,ubuf,size,flags, NULL, NULL);
+ return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
}
/*
@@ -1160,7 +1135,7 @@ asmlinkage int sys_sendmsg(int fd, struct msghdr *msg, unsigned flags)
/* Check whether to allocate the iovec area*/
err = -ENOMEM;
iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
- if (msg_sys.msg_iovlen > 1 /* UIO_FASTIOV */) {
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
if (!iov)
goto out_put;
@@ -1172,6 +1147,11 @@ asmlinkage int sys_sendmsg(int fd, struct msghdr *msg, unsigned flags)
goto out_freeiov;
total_len = err;
+ err = -ENOBUFS;
+
+ /* msg_controllen must fit to int */
+ if (msg_sys.msg_controllen > INT_MAX)
+ goto out_freeiov;
ctl_len = msg_sys.msg_controllen;
if (ctl_len)
{
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 96d81dd5a..2b8db00cc 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -175,7 +175,8 @@ rpcauth_lookup_credcache(struct rpc_task *task)
if (!cred)
cred = auth->au_ops->crcreate(task);
- rpcauth_insert_credcache(auth, cred);
+ if (cred)
+ rpcauth_insert_credcache(auth, cred);
return (struct rpc_cred *) cred;
}
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index fcd63b5a2..be6d19637 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -49,8 +49,11 @@ nul_create_cred(struct rpc_task *task)
{
struct rpc_cred *cred;
- if (!(cred = (struct rpc_cred *) rpc_malloc(task, sizeof(*cred))))
+ if (!(cred = (struct rpc_cred *) rpc_malloc(task, sizeof(*cred)))) {
+ task->tk_status = -ENOMEM;
return NULL;
+ }
+
cred->cr_count = 0;
cred->cr_flags = RPCAUTH_CRED_UPTODATE;
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 2e22c6461..6912c229d 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -68,8 +68,10 @@ unx_create_cred(struct rpc_task *task)
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
current->uid, current->gid);
- if (!(cred = (struct unx_cred *) rpc_malloc(task, sizeof(*cred))))
+ if (!(cred = (struct unx_cred *) rpc_malloc(task, sizeof(*cred)))) {
+ task->tk_status = -ENOMEM;
return NULL;
+ }
cred->uc_count = 0;
cred->uc_flags = RPCAUTH_CRED_UPTODATE;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 9d4bcb118..222a3f9ec 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -174,6 +174,7 @@ rpc_make_runnable(struct rpc_task *task)
printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n");
return;
}
+ task->tk_flags |= RPC_TASK_RUNNING;
if (RPC_IS_ASYNC(task)) {
int status;
status = rpc_add_wait_queue(&schedq, task);
@@ -186,7 +187,6 @@ rpc_make_runnable(struct rpc_task *task)
} else {
wake_up(&task->tk_wait);
}
- task->tk_flags |= RPC_TASK_RUNNING;
}
@@ -447,7 +447,10 @@ __rpc_execute(struct rpc_task *task)
task->tk_pid);
if (current->pid == rpciod_pid)
printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
- sleep_on(&task->tk_wait);
+
+ sti();
+ __wait_event(task->tk_wait, RPC_IS_RUNNING(task));
+ cli();
/*
* When the task received a signal, remove from
@@ -1000,8 +1003,8 @@ void rpc_show_tasks(void)
wreq->wb_flags, wreq->wb_pid, wreq->wb_page,
wreq->wb_offset, wreq->wb_bytes);
printk(" name=%s/%s\n",
- wreq->wb_dentry->d_parent->d_name.name,
- wreq->wb_dentry->d_name.name);
+ wreq->wb_file->f_dentry->d_parent->d_name.name,
+ wreq->wb_file->f_dentry->d_name.name);
}
}
#endif
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 4e0acee23..d2248ad74 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -24,6 +24,7 @@
#include <linux/fcntl.h>
#include <linux/net.h>
#include <linux/in.h>
+#include <linux/inet.h>
#include <linux/udp.h>
#include <linux/version.h>
#include <linux/unistd.h>
@@ -248,7 +249,8 @@ svc_sendto(struct svc_rqst *rqstp, struct iovec *iov, int nr)
msg.msg_namelen = sizeof(rqstp->rq_addr);
msg.msg_iov = iov;
msg.msg_iovlen = nr;
- msg.msg_control = 0;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
#if LINUX_VERSION_CODE >= 0x020100
msg.msg_flags = MSG_DONTWAIT;
@@ -307,7 +309,8 @@ svc_recvfrom(struct svc_rqst *rqstp, struct iovec *iov, int nr, int buflen)
msg.msg_namelen = sizeof(rqstp->rq_addr);
msg.msg_iov = iov;
msg.msg_iovlen = nr;
- msg.msg_control = 0;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
#if LINUX_VERSION_CODE >= 0x020100
msg.msg_flags = MSG_DONTWAIT;
@@ -546,14 +549,14 @@ svc_tcp_accept(struct svc_sock *svsk)
* we just punt connects from unprivileged ports. */
if (ntohs(sin.sin_port) >= 1024) {
printk(KERN_WARNING
- "%s: connect from unprivileged port: %08lx:%d",
+ "%s: connect from unprivileged port: %s:%d",
serv->sv_name,
- ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+ in_ntoa(sin.sin_addr.s_addr), ntohs(sin.sin_port));
goto failed;
}
- dprintk("%s: connect from %08lx:%04x\n", serv->sv_name,
- ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+ dprintk("%s: connect from %s:%04x\n", serv->sv_name,
+ in_ntoa(sin.sin_addr.s_addr), ntohs(sin.sin_port));
if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
goto failed;
@@ -610,7 +613,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
unsigned long want = 4 - svsk->sk_tcplen;
struct iovec iov;
- iov.iov_base = ((u32 *) &svsk->sk_reclen) + svsk->sk_tcplen;
+ iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
goto error;
@@ -620,11 +623,11 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (!(svsk->sk_reclen & 0x80000000)) {
/* FIXME: shutdown socket */
printk(KERN_NOTICE "RPC: bad TCP reclen %08lx",
- (unsigned long) svsk->sk_reclen);
+ (unsigned long) svsk->sk_reclen);
return -EIO;
}
svsk->sk_reclen &= 0x7fffffff;
- dprintk("svc: TCP record, %ld bytes\n", svsk->sk_reclen);
+ dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
}
/* Check whether enough data is available */
@@ -633,8 +636,8 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
goto error;
if (len < svsk->sk_reclen) {
- dprintk("svc: incomplete TCP record (%d of %ld)\n",
- len, svsk->sk_reclen);
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ len, svsk->sk_reclen);
svc_sock_received(svsk, ready);
len = -EAGAIN; /* record not complete */
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f7a10dcda..1e5ad01c3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -200,6 +200,7 @@ xprt_sendmsg(struct rpc_xprt *xprt)
msg.msg_name = (struct sockaddr *) &xprt->addr;
msg.msg_namelen = sizeof(xprt->addr);
msg.msg_control = NULL;
+ msg.msg_controllen = 0;
/* Dont repeat bytes */
@@ -256,6 +257,7 @@ xprt_recvmsg(struct rpc_xprt *xprt, struct iovec *iov, int nr, int len)
msg.msg_name = &sin;
msg.msg_namelen = sizeof(sin);
msg.msg_control = NULL;
+ msg.msg_controllen = 0;
oldfs = get_fs(); set_fs(get_ds());
result = sock_recvmsg(sock, &msg, len, MSG_DONTWAIT);
@@ -268,6 +270,7 @@ xprt_recvmsg(struct rpc_xprt *xprt, struct iovec *iov, int nr, int len)
msg.msg_name = &sin;
msg.msg_namelen = sizeof(sin);
msg.msg_control = NULL;
+ msg.msg_controllen = 0;
oldfs = get_fs(); set_fs(get_ds());
result = sock->ops->recvmsg(sock, &msg, len, 1, 0, &alen);
@@ -1197,7 +1200,7 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
static u32 xid = 0;
if (!xid)
- xid = jiffies;
+ xid = CURRENT_TIME << 12;
dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, req, xid);
task->tk_status = 0;
@@ -1206,6 +1209,8 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_xid = xid++;
+ if (!xid)
+ xid++;
}
/*
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ae33770fe..21614a3c6 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.73 1999/01/15 06:55:48 davem Exp $
+ * Version: $Id: af_unix.c,v 1.76 1999/05/08 05:54:55 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
@@ -33,6 +33,16 @@
* Lots of bug fixes.
* Alexey Kuznetosv : Repaired (I hope) bugs introduces
* by above two patches.
+ * Andrea Arcangeli : If possible we block in connect(2)
+ * if the max backlog of the listen socket
+ * is been reached. This won't break
+ * old apps and it will avoid huge amount
+ * of socks hashed (this for unix_gc()
+ * performances reasons).
+ * Security fix that limits the max
+ * number of socks to 2*max_files and
+ * the number of skb queueable in the
+ * dgram receiver.
*
* Known differences from reference BSD that was tested:
*
@@ -100,8 +110,12 @@
int sysctl_unix_delete_delay = HZ;
int sysctl_unix_destroy_delay = 10*HZ;
+int sysctl_unix_max_dgram_qlen = 10;
unix_socket *unix_socket_table[UNIX_HASH_SIZE+1];
+static atomic_t unix_nr_socks = ATOMIC_INIT(0);
+static struct wait_queue * unix_ack_wqueue = NULL;
+static struct wait_queue * unix_dgram_wqueue = NULL;
#define unix_sockets_unbound (unix_socket_table[UNIX_HASH_SIZE])
@@ -263,6 +277,8 @@ static void unix_destroy_timer(unsigned long data)
unix_socket *sk=(unix_socket *)data;
if(!unix_locked(sk) && atomic_read(&sk->wmem_alloc) == 0)
{
+ atomic_dec(&unix_nr_socks);
+
sk_free(sk);
/* socket destroyed, decrement count */
@@ -295,13 +311,18 @@ static int unix_release_sock (unix_socket *sk)
sk->dead=1;
sk->socket = NULL;
+ if (sk->state == TCP_LISTEN)
+ wake_up_interruptible(&unix_ack_wqueue);
+ if (sk->type == SOCK_DGRAM)
+ wake_up_interruptible(&unix_dgram_wqueue);
+
skpair=unix_peer(sk);
if (skpair!=NULL)
{
if (sk->type==SOCK_STREAM && unix_our_peer(sk, skpair))
{
- skpair->state_change(skpair);
+ skpair->data_ready(skpair,0);
skpair->shutdown=SHUTDOWN_MASK; /* No more writes*/
}
unix_unlock(skpair); /* It may now die */
@@ -347,6 +368,8 @@ static void unix_destroy_socket(unix_socket *sk)
if(!unix_locked(sk) && atomic_read(&sk->wmem_alloc) == 0)
{
+ atomic_dec(&unix_nr_socks);
+
sk_free(sk);
/* socket destroyed, decrement count */
@@ -371,6 +394,8 @@ static int unix_listen(struct socket *sock, int backlog)
return -EOPNOTSUPP; /* Only stream sockets accept */
if (!sk->protinfo.af_unix.addr)
return -EINVAL; /* No listens on an unbound socket */
+ if ((unsigned) backlog > SOMAXCONN)
+ backlog = SOMAXCONN;
sk->max_ack_backlog=backlog;
sk->state=TCP_LISTEN;
sock->flags |= SO_ACCEPTCON;
@@ -388,6 +413,9 @@ static struct sock * unix_create1(struct socket *sock, int stream)
{
struct sock *sk;
+ if (atomic_read(&unix_nr_socks) >= 2*max_files)
+ return NULL;
+
MOD_INC_USE_COUNT;
sk = sk_alloc(PF_UNIX, GFP_KERNEL, 1);
if (!sk) {
@@ -395,6 +423,8 @@ static struct sock * unix_create1(struct socket *sock, int stream)
return NULL;
}
+ atomic_inc(&unix_nr_socks);
+
sock_init_data(sock,sk);
if (stream)
@@ -673,9 +703,25 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
we will have to recheck all again in any case.
*/
+restart:
/* Find listening sock */
other=unix_find_other(sunaddr, addr_len, sk->type, hash, &err);
+ if (!other)
+ return -ECONNREFUSED;
+
+ while (other->ack_backlog >= other->max_ack_backlog) {
+ unix_unlock(other);
+ if (other->dead || other->state != TCP_LISTEN)
+ return -ECONNREFUSED;
+ if (flags & O_NONBLOCK)
+ return -EAGAIN;
+ interruptible_sleep_on(&unix_ack_wqueue);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ goto restart;
+ }
+
/* create new sock for complete connection */
newsk = unix_create1(NULL, 1);
@@ -704,7 +750,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
/* Check that listener is in valid state. */
err = -ECONNREFUSED;
- if (other == NULL || other->dead || other->state != TCP_LISTEN)
+ if (other->dead || other->state != TCP_LISTEN)
goto out;
err = -ENOMEM;
@@ -815,11 +861,10 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
continue;
}
tsk = skb->sk;
- sk->ack_backlog--;
+ if (sk->max_ack_backlog == sk->ack_backlog--)
+ wake_up_interruptible(&unix_ack_wqueue);
kfree_skb(skb);
- if (!tsk->dead)
- break;
- unix_release_sock(tsk);
+ break;
}
@@ -947,6 +992,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
* Check with 1003.1g - what should
* datagram error
*/
+ dead:
unix_unlock(other);
unix_peer(sk)=NULL;
other = NULL;
@@ -964,6 +1010,29 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
goto out_unlock;
}
+ while (skb_queue_len(&other->receive_queue) >=
+ sysctl_unix_max_dgram_qlen)
+ {
+ if (sock->file->f_flags & O_NONBLOCK)
+ {
+ err = -EAGAIN;
+ goto out_unlock;
+ }
+ interruptible_sleep_on(&unix_dgram_wqueue);
+ if (other->dead)
+ goto dead;
+ if (sk->shutdown & SEND_SHUTDOWN)
+ {
+ err = -EPIPE;
+ goto out_unlock;
+ }
+ if (signal_pending(current))
+ {
+ err = -ERESTARTSYS;
+ goto out_unlock;
+ }
+ }
+
skb_queue_tail(&other->receive_queue, skb);
other->data_ready(other,len);
@@ -1126,6 +1195,13 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, int size,
if (!skb)
goto out;
+ /*
+ * sysctl_unix_max_dgram_qlen may change over the time we blocked
+ * in the waitqueue so we must wakeup every time we shrink the
+ * receiver queue. -arca
+ */
+ wake_up_interruptible(&unix_dgram_wqueue);
+
if (msg->msg_name)
{
msg->msg_namelen = sizeof(short);
@@ -1333,7 +1409,10 @@ static int unix_shutdown(struct socket *sock, int mode)
if (mode&SEND_SHUTDOWN)
peer_mode |= RCV_SHUTDOWN;
other->shutdown |= peer_mode;
- other->state_change(other);
+ if (peer_mode&RCV_SHUTDOWN)
+ other->data_ready(other,0);
+ else
+ other->state_change(other);
}
}
return 0;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 3dcc2cada..4f659bd9f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -17,11 +17,12 @@
*
* - explicit stack instead of recursion
* - tail recurse on first born instead of immediate push/pop
+ * - we gather the stuff that should not be killed into tree
+ * and stack is just a path from root to the current pointer.
*
* Future optimizations:
*
* - don't just push entire root set; process in place
- * - use linked list for internal stack
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -48,8 +49,18 @@
* such socket and closed it (descriptor). That would happen on
* each unix_gc() until the accept(). Since the struct file in
* question would go to the free list and might be reused...
- * That might be the reason of random oopses on close_fp() in
- * unrelated processes.
+ * That might be the reason of random oopses on filp_close()
+ * in unrelated processes.
+ *
+ * AV 28 Feb 1999
+ * Kill the explicit allocation of stack. Now we keep the tree
+ * with root in dummy + pointer (gc_current) to one of the nodes.
+ * Stack is represented as path from gc_current to dummy. Unmark
+ * now means "add to tree". Push == "make it a son of gc_current".
+ * Pop == "move gc_current to parent". We keep only pointers to
+ * parents (->gc_tree).
+ * AV 1 Mar 1999
+ * Damn. Added missing check for ->dead in listen queues scanning.
*
*/
@@ -65,7 +76,6 @@
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -74,9 +84,8 @@
/* Internal data structures and random procedures: */
-static unix_socket **stack; /* stack of objects to mark */
-static int in_stack = 0; /* first free entry in stack */
-static int max_stack; /* Top of stack */
+#define GC_HEAD ((unix_socket *)(-1))
+static unix_socket *gc_current=GC_HEAD; /* stack of objects to mark */
extern inline unix_socket *unix_get_socket(struct file *filp)
{
@@ -122,32 +131,25 @@ void unix_notinflight(struct file *fp)
/*
* Garbage Collector Support Functions
*/
-
-extern inline void push_stack(unix_socket *x)
-{
- if (in_stack == max_stack)
- panic("can't push onto full stack");
- stack[in_stack++] = x;
-}
extern inline unix_socket *pop_stack(void)
{
- if (in_stack == 0)
- panic("can't pop empty gc stack");
- return stack[--in_stack];
+ unix_socket *p=gc_current;
+ gc_current = p->protinfo.af_unix.gc_tree;
+ return p;
}
extern inline int empty_stack(void)
{
- return in_stack == 0;
+ return gc_current == GC_HEAD;
}
extern inline void maybe_unmark_and_push(unix_socket *x)
{
- if (!(x->protinfo.af_unix.marksweep&MARKED))
+ if (x->protinfo.af_unix.gc_tree)
return;
- x->protinfo.af_unix.marksweep&=~MARKED;
- push_stack(x);
+ x->protinfo.af_unix.gc_tree = gc_current;
+ gc_current = x;
}
@@ -169,23 +171,9 @@ void unix_gc(void)
return;
in_unix_gc=1;
- if(stack==NULL || max_files>max_stack)
- {
- if(stack)
- vfree(stack);
- stack=(unix_socket **)vmalloc(max_files*sizeof(struct unix_socket *));
- if(stack==NULL)
- {
- printk(KERN_NOTICE "unix_gc: deferred due to low memory.\n");
- in_unix_gc=0;
- return;
- }
- max_stack=max_files;
- }
-
forall_unix_sockets(i, s)
{
- s->protinfo.af_unix.marksweep|=MARKED;
+ s->protinfo.af_unix.gc_tree=NULL;
}
/*
* Everything is now marked
@@ -262,7 +250,7 @@ tail:
}
}
/* We have to scan not-yet-accepted ones too */
- if (UNIXCB(skb).attr & MSG_SYN) {
+ if ((UNIXCB(skb).attr & MSG_SYN) && !skb->sk->dead) {
if (f==NULL)
f=skb->sk;
else
@@ -276,9 +264,9 @@ tail:
if (f)
{
- if ((f->protinfo.af_unix.marksweep&MARKED))
+ if (!f->protinfo.af_unix.gc_tree)
{
- f->protinfo.af_unix.marksweep&=~MARKED;
+ f->protinfo.af_unix.gc_tree=GC_HEAD;
x=f;
f=NULL;
goto tail;
@@ -290,7 +278,7 @@ tail:
forall_unix_sockets(i, s)
{
- if (s->protinfo.af_unix.marksweep&MARKED)
+ if (!s->protinfo.af_unix.gc_tree)
{
struct sk_buff *nextsk;
skb=skb_peek(&s->receive_queue);
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index d492e8e2b..2f06a3643 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -19,6 +19,7 @@
extern int sysctl_unix_destroy_delay;
extern int sysctl_unix_delete_delay;
+extern int sysctl_unix_max_dgram_qlen;
ctl_table unix_table[] = {
{NET_UNIX_DESTROY_DELAY, "destroy_delay",
@@ -27,6 +28,9 @@ ctl_table unix_table[] = {
{NET_UNIX_DELETE_DELAY, "delete_delay",
&sysctl_unix_delete_delay, sizeof(int), 0644, NULL,
&proc_dointvec_jiffies},
+ {NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen",
+ &sysctl_unix_max_dgram_qlen, sizeof(int), 0600, NULL,
+ &proc_dointvec_jiffies},
{0}
};