summaryrefslogtreecommitdiffstats
path: root/net/inet
diff options
context:
space:
mode:
Diffstat (limited to 'net/inet')
-rw-r--r--net/inet/Makefile68
-rw-r--r--net/inet/README21
-rw-r--r--net/inet/af_inet.c1410
-rw-r--r--net/inet/arp.c1228
-rw-r--r--net/inet/arp.h18
-rw-r--r--net/inet/datagram.c209
-rw-r--r--net/inet/datalink.h17
-rw-r--r--net/inet/dev.c1472
-rw-r--r--net/inet/devinet.c214
-rw-r--r--net/inet/eth.c196
-rw-r--r--net/inet/eth.h35
-rw-r--r--net/inet/icmp.c748
-rw-r--r--net/inet/icmp.h38
-rw-r--r--net/inet/ip.c2028
-rw-r--r--net/inet/ip.h91
-rw-r--r--net/inet/ipx.c1360
-rw-r--r--net/inet/ipx.h71
-rw-r--r--net/inet/ipxcall.h2
-rw-r--r--net/inet/p8022.c97
-rw-r--r--net/inet/p8022.h2
-rw-r--r--net/inet/p8022call.h2
-rw-r--r--net/inet/p8023.c34
-rw-r--r--net/inet/packet.c391
-rw-r--r--net/inet/pe2.c34
-rw-r--r--net/inet/proc.c222
-rw-r--r--net/inet/protocol.c159
-rw-r--r--net/inet/protocol.h59
-rw-r--r--net/inet/rarp.c490
-rw-r--r--net/inet/rarp.h14
-rw-r--r--net/inet/raw.c354
-rw-r--r--net/inet/raw.h36
-rw-r--r--net/inet/route.c657
-rw-r--r--net/inet/route.h53
-rw-r--r--net/inet/skbuff.c563
-rw-r--r--net/inet/snmp.h107
-rw-r--r--net/inet/sock.c544
-rw-r--r--net/inet/sock.h287
-rw-r--r--net/inet/tcp.c4582
-rw-r--r--net/inet/tcp.h134
-rw-r--r--net/inet/timer.c262
-rw-r--r--net/inet/udp.c672
-rw-r--r--net/inet/udp.h50
-rw-r--r--net/inet/utils.c91
43 files changed, 19122 insertions, 0 deletions
diff --git a/net/inet/Makefile b/net/inet/Makefile
new file mode 100644
index 000000000..95af29230
--- /dev/null
+++ b/net/inet/Makefile
@@ -0,0 +1,68 @@
+#
+# Makefile for the Linux TCP/IP (INET) layer.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+.c.o:
+ $(CC) $(CFLAGS) -c $<
+.s.o:
+ $(AS) -o $*.o $<
+.c.s:
+ $(CC) $(CFLAGS) -S $<
+
+
+OBJS := sock.o eth.o dev.o skbuff.o datagram.o
+
+ifdef CONFIG_INET
+
+OBJS := $(OBJS) utils.o route.o proc.o timer.o protocol.o packet.o \
+ arp.o ip.o raw.o icmp.o tcp.o udp.o devinet.o af_inet.o
+
+endif
+
+ifdef CONFIG_INET_RARP
+
+OBJS := $(OBJS) rarp.o
+
+endif
+
+ifdef CONFIG_AX25
+
+OBJS := $(OBJS) ax25.o ax25_in.o ax25_out.o ax25_route.o ax25_subr.o ax25_timer.o
+
+endif
+
+ifdef CONFIG_IPX
+
+OBJS := $(OBJS) ipx.o pe2.o p8022.o p8023.o
+
+endif
+
+ifdef CONFIG_NET
+
+inet.o: $(OBJS)
+ $(LD) -r -o inet.o $(OBJS)
+
+else
+
+inet.o:
+ echo | $(AS) -o inet.o
+
+endif
+
+dep:
+ $(CPP) -M *.c > .depend
+
+tar:
+ tar -cvf /dev/f1 .
+
+#
+# include a dependency file if one exists
+#
+ifeq (.depend,$(wildcard .depend))
+include .depend
+endif
diff --git a/net/inet/README b/net/inet/README
new file mode 100644
index 000000000..005795df0
--- /dev/null
+++ b/net/inet/README
@@ -0,0 +1,21 @@
+Changes for NET3.017
+
+This is mostly small stuff as follows:
+
+o accept()ed socket don't end up with an invalid sk->socket and give bogus
+ netstat output.
+o FASYNC/SIGIO now works with sockets.
+o Fixed the permissions on F_SETOWN for all. Its now as broken/working
+ as other systems. Really we need something like a 32bit generation
+ number on processes.
+o ARP allows proxy for whole networks (a la cisco routers)
+o TCP sendto() reports ENOTCONN in the right cases
+o Removed some surplus uncommented code from tcp.c
+o Fixed protocol violation during closedown in tcp.c
+ [Still not got the window < MSS bug fix included]
+
+Fixes for 1.1.58
+
+o non blocking connect fail gets the error code right.
+o select() not reporting read ok after an urgent read fixed.
+
diff --git a/net/inet/af_inet.c b/net/inet/af_inet.c
new file mode 100644
index 000000000..8e7739611
--- /dev/null
+++ b/net/inet/af_inet.c
@@ -0,0 +1,1410 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * AF_INET protocol family socket handler.
+ *
+ * Version: @(#)af_inet.c (from sock.c) 1.0.17 06/02/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche, <flla@stud.uni-sb.de>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ *
+ * Changes (see also sock.c)
+ *
+ * A.N.Kuznetsov : Socket death error in accept().
+ * John Richardson : Fix non blocking error in connect()
+ * so sockets that fail to connect
+ * don't return -EINPROGRESS.
+ * Alan Cox : Asynchronous I/O support
+ * Alan Cox : Keep correct socket pointer on sock structures
+ * when accept() ed
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "arp.h"
+#include "rarp.h"
+#include "route.h"
+#include "tcp.h"
+#include "udp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "raw.h"
+#include "icmp.h"
+
+#define min(a,b) ((a)<(b)?(a):(b))
+
+extern struct proto packet_prot;
+
+
+/*
+ * See if a socket number is in use.
+ */
+
+static int sk_inuse(struct proto *prot, int num)
+{
+ struct sock *sk;
+
+ for(sk = prot->sock_array[num & (SOCK_ARRAY_SIZE -1 )];
+ sk != NULL; sk=sk->next)
+ {
+ if (sk->num == num)
+ return(1);
+ }
+ return(0);
+}
+
+
+/*
+ * Pick a new socket number
+ */
+
+unsigned short get_new_socknum(struct proto *prot, unsigned short base)
+{
+ static int start=0;
+
+ /*
+ * Used to cycle through the port numbers so the
+ * chances of a confused connection drop.
+ */
+
+ int i, j;
+ int best = 0;
+ int size = 32767; /* a big num. */
+ struct sock *sk;
+
+ if (base == 0)
+ base = PROT_SOCK+1+(start % 1024);
+ if (base <= PROT_SOCK)
+ {
+ base += PROT_SOCK+(start % 1024);
+ }
+
+ /* Now look through the entire array and try to find an empty ptr. */
+ for(i=0; i < SOCK_ARRAY_SIZE; i++)
+ {
+ j = 0;
+ sk = prot->sock_array[(i+base+1) &(SOCK_ARRAY_SIZE -1)];
+ while(sk != NULL)
+ {
+ sk = sk->next;
+ j++;
+ }
+ if (j == 0)
+ {
+ start =(i+1+start )%1024;
+ return(i+base+1);
+ }
+ if (j < size)
+ {
+ best = i;
+ size = j;
+ }
+ }
+
+ /* Now make sure the one we want is not in use. */
+
+ while(sk_inuse(prot, base +best+1))
+ {
+ best += SOCK_ARRAY_SIZE;
+ }
+ return(best+base+1);
+}
+
+/*
+ * Add a socket into the socket tables by number.
+ */
+
+void put_sock(unsigned short num, struct sock *sk)
+{
+ struct sock *sk1;
+ struct sock *sk2;
+ int mask;
+
+ sk->num = num;
+ sk->next = NULL;
+ num = num &(SOCK_ARRAY_SIZE -1);
+
+ /* We can't have an interrupt re-enter here. */
+ cli();
+ if (sk->prot->sock_array[num] == NULL)
+ {
+ sk->prot->sock_array[num] = sk;
+ sti();
+ return;
+ }
+ sti();
+ for(mask = 0xff000000; mask != 0xffffffff; mask = (mask >> 8) | mask)
+ {
+ if ((mask & sk->saddr) &&
+ (mask & sk->saddr) != (mask & 0xffffffff))
+ {
+ mask = mask << 8;
+ break;
+ }
+ }
+ cli();
+ sk1 = sk->prot->sock_array[num];
+ for(sk2 = sk1; sk2 != NULL; sk2=sk2->next)
+ {
+ if (!(sk2->saddr & mask))
+ {
+ if (sk2 == sk1)
+ {
+ sk->next = sk->prot->sock_array[num];
+ sk->prot->sock_array[num] = sk;
+ sti();
+ return;
+ }
+ sk->next = sk2;
+ sk1->next= sk;
+ sti();
+ return;
+ }
+ sk1 = sk2;
+ }
+
+ /* Goes at the end. */
+ sk->next = NULL;
+ sk1->next = sk;
+ sti();
+}
+
+/*
+ * Remove a socket from the socket tables.
+ */
+
+static void remove_sock(struct sock *sk1)
+{
+ struct sock *sk2;
+
+ if (!sk1->prot)
+ {
+ printk("sock.c: remove_sock: sk1->prot == NULL\n");
+ return;
+ }
+
+ /* We can't have this changing out from under us. */
+ cli();
+ sk2 = sk1->prot->sock_array[sk1->num &(SOCK_ARRAY_SIZE -1)];
+ if (sk2 == sk1)
+ {
+ sk1->prot->sock_array[sk1->num &(SOCK_ARRAY_SIZE -1)] = sk1->next;
+ sti();
+ return;
+ }
+
+ while(sk2 && sk2->next != sk1)
+ {
+ sk2 = sk2->next;
+ }
+
+ if (sk2)
+ {
+ sk2->next = sk1->next;
+ sti();
+ return;
+ }
+ sti();
+}
+
+/*
+ * Destroy an AF_INET socket
+ */
+
+void destroy_sock(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ sk->inuse = 1; /* just to be safe. */
+
+ /* Incase it's sleeping somewhere. */
+ if (!sk->dead)
+ sk->write_space(sk);
+
+ remove_sock(sk);
+
+ /* Now we can no longer get new packets. */
+ delete_timer(sk);
+
+ while ((skb = tcp_dequeue_partial(sk)) != NULL) {
+ IS_SKB(skb);
+ kfree_skb(skb, FREE_WRITE);
+ }
+
+ /* Cleanup up the write buffer. */
+ while((skb = skb_dequeue(&sk->write_queue)) != NULL) {
+ IS_SKB(skb);
+ kfree_skb(skb, FREE_WRITE);
+ }
+
+ /*
+ * Don't discard received data until the user side kills its
+ * half of the socket.
+ */
+
+ if (sk->dead)
+ {
+ while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
+ {
+ /*
+ * This will take care of closing sockets that were
+ * listening and didn't accept everything.
+ */
+ if (skb->sk != NULL && skb->sk != sk)
+ {
+ IS_SKB(skb);
+ skb->sk->dead = 1;
+ skb->sk->prot->close(skb->sk, 0);
+ }
+ IS_SKB(skb);
+ kfree_skb(skb, FREE_READ);
+ }
+ }
+
+ /* Now we need to clean up the send head. */
+ cli();
+ for(skb = sk->send_head; skb != NULL; )
+ {
+ struct sk_buff *skb2;
+
+ /*
+ * We need to remove skb from the transmit queue,
+ * or maybe the arp queue.
+ */
+ if (skb->next && skb->prev) {
+/* printk("destroy_sock: unlinked skb\n");*/
+ IS_SKB(skb);
+ skb_unlink(skb);
+ }
+ skb->dev = NULL;
+ skb2 = skb->link3;
+ kfree_skb(skb, FREE_WRITE);
+ skb = skb2;
+ }
+ sk->send_head = NULL;
+ sti();
+
+ /* And now the backlog. */
+ while((skb=skb_dequeue(&sk->back_log))!=NULL)
+ {
+ /* this should never happen. */
+/* printk("cleaning back_log\n");*/
+ kfree_skb(skb, FREE_READ);
+ }
+
+ /* Now if it has a half accepted/ closed socket. */
+ if (sk->pair)
+ {
+ sk->pair->dead = 1;
+ sk->pair->prot->close(sk->pair, 0);
+ sk->pair = NULL;
+ }
+
+ /*
+ * Now if everything is gone we can free the socket
+ * structure, otherwise we need to keep it around until
+ * everything is gone.
+ */
+
+ if (sk->dead && sk->rmem_alloc == 0 && sk->wmem_alloc == 0)
+ {
+ kfree_s((void *)sk,sizeof(*sk));
+ }
+ else
+ {
+ /* this should never happen. */
+ /* actually it can if an ack has just been sent. */
+ sk->destroy = 1;
+ sk->ack_backlog = 0;
+ sk->inuse = 0;
+ reset_timer(sk, TIME_DESTROY, SOCK_DESTROY_TIME);
+ }
+}
+
+/*
+ * The routines beyond this point handle the behaviour of an AF_INET
+ * socket object. Mostly it punts to the subprotocols of IP to do
+ * the work.
+ */
+
+static int inet_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk;
+
+ sk = (struct sock *) sock->data;
+
+ switch(cmd)
+ {
+ case F_SETOWN:
+ /*
+ * This is a little restrictive, but it's the only
+ * way to make sure that you can't send a sigurg to
+ * another process.
+ */
+ if (!suser() && current->pgrp != -arg &&
+ current->pid != arg) return(-EPERM);
+ sk->proc = arg;
+ return(0);
+ case F_GETOWN:
+ return(sk->proc);
+ default:
+ return(-EINVAL);
+ }
+}
+
+/*
+ * Set socket options on an inet socket.
+ */
+
+static int inet_setsockopt(struct socket *sock, int level, int optname,
+ char *optval, int optlen)
+{
+ struct sock *sk = (struct sock *) sock->data;
+ if (level == SOL_SOCKET)
+ return sock_setsockopt(sk,level,optname,optval,optlen);
+ if (sk->prot->setsockopt==NULL)
+ return(-EOPNOTSUPP);
+ else
+ return sk->prot->setsockopt(sk,level,optname,optval,optlen);
+}
+
+/*
+ * Get a socket option on an AF_INET socket.
+ */
+
+static int inet_getsockopt(struct socket *sock, int level, int optname,
+ char *optval, int *optlen)
+{
+ struct sock *sk = (struct sock *) sock->data;
+ if (level == SOL_SOCKET)
+ return sock_getsockopt(sk,level,optname,optval,optlen);
+ if(sk->prot->getsockopt==NULL)
+ return(-EOPNOTSUPP);
+ else
+ return sk->prot->getsockopt(sk,level,optname,optval,optlen);
+}
+
+/*
+ * Automatically bind an unbound socket.
+ */
+
+static int inet_autobind(struct sock *sk)
+{
+ /* We may need to bind the socket. */
+ if (sk->num == 0)
+ {
+ sk->num = get_new_socknum(sk->prot, 0);
+ if (sk->num == 0)
+ return(-EAGAIN);
+ put_sock(sk->num, sk);
+ sk->dummy_th.source = ntohs(sk->num);
+ }
+ return 0;
+}
+
+/*
+ * Move a socket into listening state.
+ */
+
+static int inet_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = (struct sock *) sock->data;
+
+ if(inet_autobind(sk)!=0)
+ return -EAGAIN;
+
+ /* We might as well re use these. */
+ sk->max_ack_backlog = backlog;
+ if (sk->state != TCP_LISTEN)
+ {
+ sk->ack_backlog = 0;
+ sk->state = TCP_LISTEN;
+ }
+ return(0);
+}
+
+/*
+ * Default callbacks for user INET sockets. These just wake up
+ * the user owning the socket.
+ */
+
+static void def_callback1(struct sock *sk)
+{
+ if(!sk->dead)
+ wake_up_interruptible(sk->sleep);
+}
+
+static void def_callback2(struct sock *sk,int len)
+{
+ if(!sk->dead)
+ {
+ wake_up_interruptible(sk->sleep);
+ sock_wake_async(sk->socket);
+ }
+}
+
+
+/*
+ * Create an inet socket.
+ *
+ * FIXME: Gcc would generate much better code if we set the parameters
+ * up in in-memory structure order. Gcc68K even more so
+ */
+
+static int inet_create(struct socket *sock, int protocol)
+{
+ struct sock *sk;
+ struct proto *prot;
+ int err;
+
+ sk = (struct sock *) kmalloc(sizeof(*sk), GFP_KERNEL);
+ if (sk == NULL)
+ return(-ENOBUFS);
+ sk->num = 0;
+ sk->reuse = 0;
+ switch(sock->type)
+ {
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ if (protocol && protocol != IPPROTO_TCP)
+ {
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-EPROTONOSUPPORT);
+ }
+ protocol = IPPROTO_TCP;
+ sk->no_check = TCP_NO_CHECK;
+ prot = &tcp_prot;
+ break;
+
+ case SOCK_DGRAM:
+ if (protocol && protocol != IPPROTO_UDP)
+ {
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-EPROTONOSUPPORT);
+ }
+ protocol = IPPROTO_UDP;
+ sk->no_check = UDP_NO_CHECK;
+ prot=&udp_prot;
+ break;
+
+ case SOCK_RAW:
+ if (!suser())
+ {
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-EPERM);
+ }
+ if (!protocol)
+ {
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-EPROTONOSUPPORT);
+ }
+ prot = &raw_prot;
+ sk->reuse = 1;
+ sk->no_check = 0; /*
+ * Doesn't matter no checksum is
+ * performed anyway.
+ */
+ sk->num = protocol;
+ break;
+
+ case SOCK_PACKET:
+ if (!suser())
+ {
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-EPERM);
+ }
+ if (!protocol)
+ {
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-EPROTONOSUPPORT);
+ }
+ prot = &packet_prot;
+ sk->reuse = 1;
+ sk->no_check = 0; /* Doesn't matter no checksum is
+ * performed anyway.
+ */
+ sk->num = protocol;
+ break;
+
+ default:
+ kfree_s((void *)sk, sizeof(*sk));
+ return(-ESOCKTNOSUPPORT);
+ }
+ sk->socket = sock;
+#ifdef CONFIG_TCP_NAGLE_OFF
+ sk->nonagle = 1;
+#else
+ sk->nonagle = 0;
+#endif
+ sk->type = sock->type;
+ sk->stamp.tv_sec=0;
+ sk->protocol = protocol;
+ sk->wmem_alloc = 0;
+ sk->rmem_alloc = 0;
+ sk->sndbuf = SK_WMEM_MAX;
+ sk->rcvbuf = SK_RMEM_MAX;
+ sk->pair = NULL;
+ sk->opt = NULL;
+ sk->write_seq = 0;
+ sk->acked_seq = 0;
+ sk->copied_seq = 0;
+ sk->fin_seq = 0;
+ sk->urg_seq = 0;
+ sk->urg_data = 0;
+ sk->proc = 0;
+ sk->rtt = 0; /*TCP_WRITE_TIME << 3;*/
+ sk->rto = TCP_TIMEOUT_INIT; /*TCP_WRITE_TIME*/
+ sk->mdev = 0;
+ sk->backoff = 0;
+ sk->packets_out = 0;
+ sk->cong_window = 1; /* start with only sending one packet at a time. */
+ sk->cong_count = 0;
+ sk->ssthresh = 0;
+ sk->max_window = 0;
+ sk->urginline = 0;
+ sk->intr = 0;
+ sk->linger = 0;
+ sk->destroy = 0;
+ sk->priority = 1;
+ sk->shutdown = 0;
+ sk->keepopen = 0;
+ sk->zapped = 0;
+ sk->done = 0;
+ sk->ack_backlog = 0;
+ sk->window = 0;
+ sk->bytes_rcv = 0;
+ sk->state = TCP_CLOSE;
+ sk->dead = 0;
+ sk->ack_timed = 0;
+ sk->partial = NULL;
+ sk->user_mss = 0;
+ sk->debug = 0;
+
+ /* this is how many unacked bytes we will accept for this socket. */
+ sk->max_unacked = 2048; /* needs to be at most 2 full packets. */
+
+ /* how many packets we should send before forcing an ack.
+ if this is set to zero it is the same as sk->delay_acks = 0 */
+ sk->max_ack_backlog = 0;
+ sk->inuse = 0;
+ sk->delay_acks = 0;
+ skb_queue_head_init(&sk->write_queue);
+ skb_queue_head_init(&sk->receive_queue);
+ sk->mtu = 576;
+ sk->prot = prot;
+ sk->sleep = sock->wait;
+ sk->daddr = 0;
+ sk->saddr = 0 /* ip_my_addr() */;
+ sk->err = 0;
+ sk->next = NULL;
+ sk->pair = NULL;
+ sk->send_tail = NULL;
+ sk->send_head = NULL;
+ sk->timeout = 0;
+ sk->broadcast = 0;
+ sk->localroute = 0;
+ init_timer(&sk->timer);
+ sk->timer.data = (unsigned long)sk;
+ sk->timer.function = &net_timer;
+ skb_queue_head_init(&sk->back_log);
+ sk->blog = 0;
+ sock->data =(void *) sk;
+ sk->dummy_th.doff = sizeof(sk->dummy_th)/4;
+ sk->dummy_th.res1=0;
+ sk->dummy_th.res2=0;
+ sk->dummy_th.urg_ptr = 0;
+ sk->dummy_th.fin = 0;
+ sk->dummy_th.syn = 0;
+ sk->dummy_th.rst = 0;
+ sk->dummy_th.psh = 0;
+ sk->dummy_th.ack = 0;
+ sk->dummy_th.urg = 0;
+ sk->dummy_th.dest = 0;
+ sk->ip_tos=0;
+ sk->ip_ttl=64;
+
+ sk->state_change = def_callback1;
+ sk->data_ready = def_callback2;
+ sk->write_space = def_callback1;
+ sk->error_report = def_callback1;
+
+ if (sk->num)
+ {
+ /*
+ * It assumes that any protocol which allows
+ * the user to assign a number at socket
+ * creation time automatically
+ * shares.
+ */
+ put_sock(sk->num, sk);
+ sk->dummy_th.source = ntohs(sk->num);
+ }
+
+ if (sk->prot->init)
+ {
+ err = sk->prot->init(sk);
+ if (err != 0)
+ {
+ destroy_sock(sk);
+ return(err);
+ }
+ }
+ return(0);
+}
+
+
+/*
+ * Duplicate a socket.
+ */
+
+static int inet_dup(struct socket *newsock, struct socket *oldsock)
+{
+ return(inet_create(newsock,((struct sock *)(oldsock->data))->protocol));
+}
+
+
+/*
+ * The peer socket should always be NULL (or else). When we call this
+ * function we are destroying the object and from then on nobody
+ * should refer to it.
+ */
+
+static int inet_release(struct socket *sock, struct socket *peer)
+{
+ struct sock *sk = (struct sock *) sock->data;
+ if (sk == NULL)
+ return(0);
+
+ sk->state_change(sk);
+
+ /* Start closing the connection. This may take a while. */
+
+ /*
+ * If linger is set, we don't return until the close
+ * is complete. Other wise we return immediately. The
+ * actually closing is done the same either way.
+ */
+
+ if (sk->linger == 0)
+ {
+ sk->prot->close(sk,0);
+ sk->dead = 1;
+ }
+ else
+ {
+ sk->prot->close(sk, 0);
+ cli();
+ if (sk->lingertime)
+ current->timeout = jiffies + HZ*sk->lingertime;
+ while(sk->state != TCP_CLOSE && current->timeout>0)
+ {
+ interruptible_sleep_on(sk->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ break;
+#if 0
+ /* not working now - closes can't be restarted */
+ sti();
+ current->timeout=0;
+ return(-ERESTARTSYS);
+#endif
+ }
+ }
+ current->timeout=0;
+ sti();
+ sk->dead = 1;
+ }
+ sk->inuse = 1;
+
+ /* This will destroy it. */
+ release_sock(sk);
+ sock->data = NULL;
+ sk->socket = NULL;
+ return(0);
+}
+
+
+/* this needs to be changed to disallow
+ the rebinding of sockets. What error
+ should it return? */
+
+static int inet_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
+{
+ struct sockaddr_in *addr=(struct sockaddr_in *)uaddr;
+ struct sock *sk=(struct sock *)sock->data, *sk2;
+ unsigned short snum;
+ int chk_addr_ret;
+
+ /* check this error. */
+ if (sk->state != TCP_CLOSE)
+ return(-EIO);
+ if (sk->num != 0)
+ return(-EINVAL);
+
+ if(addr_len<sizeof(struct sockaddr_in))
+ return -EINVAL;
+
+ snum = ntohs(addr->sin_port);
+
+ /*
+ * We can't just leave the socket bound wherever it is, it might
+ * be bound to a privileged port. However, since there seems to
+ * be a bug here, we will leave it if the port is not privileged.
+ */
+ if (snum == 0)
+ {
+ snum = get_new_socknum(sk->prot, 0);
+ }
+ if (snum < PROT_SOCK && !suser())
+ return(-EACCES);
+
+ chk_addr_ret = ip_chk_addr(addr->sin_addr.s_addr);
+ if (addr->sin_addr.s_addr != 0 && chk_addr_ret != IS_MYADDR)
+ return(-EADDRNOTAVAIL); /* Source address MUST be ours! */
+
+ if (chk_addr_ret || addr->sin_addr.s_addr == 0)
+ sk->saddr = addr->sin_addr.s_addr;
+
+ /* Make sure we are allowed to bind here. */
+ cli();
+outside_loop:
+ for(sk2 = sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)];
+ sk2 != NULL; sk2 = sk2->next)
+ {
+/* should be below! */
+ if (sk2->num != snum) continue;
+ if (sk2->dead)
+ {
+ destroy_sock(sk2);
+ goto outside_loop;
+ }
+ if (!sk->reuse)
+ {
+ sti();
+ return(-EADDRINUSE);
+ }
+
+ if (sk2->num != snum)
+ continue; /* more than one */
+ if (sk2->saddr != sk->saddr)
+ continue; /* socket per slot ! -FB */
+ if (!sk2->reuse)
+ {
+ sti();
+ return(-EADDRINUSE);
+ }
+ }
+ sti();
+
+ remove_sock(sk);
+ put_sock(snum, sk);
+ sk->dummy_th.source = ntohs(sk->num);
+ sk->daddr = 0;
+ sk->dummy_th.dest = 0;
+ return(0);
+}
+
+/*
+ * Handle sk->err properly. The cli/sti matter.
+ */
+
+static int inet_error(struct sock *sk)
+{
+ unsigned long flags;
+ int err;
+ save_flags(flags);
+ cli();
+ err=sk->err;
+ sk->err=0;
+ sti();
+ return -err;
+}
+
+/*
+ * Connect to a remote host. There is regrettably still a little
+ * TCP 'magic' in here.
+ */
+
+static int inet_connect(struct socket *sock, struct sockaddr * uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk=(struct sock *)sock->data;
+ int err;
+ sock->conn = NULL;
+
+ if (sock->state == SS_CONNECTING && tcp_connected(sk->state))
+ {
+ sock->state = SS_CONNECTED;
+ /* Connection completing after a connect/EINPROGRESS/select/connect */
+ return 0; /* Rock and roll */
+ }
+
+ if (sock->state == SS_CONNECTING && sk->protocol == IPPROTO_TCP && (flags & O_NONBLOCK))
+ return -EALREADY; /* Connecting is currently in progress */
+
+ if (sock->state != SS_CONNECTING)
+ {
+ /* We may need to bind the socket. */
+ if(inet_autobind(sk)!=0)
+ return(-EAGAIN);
+ if (sk->prot->connect == NULL)
+ return(-EOPNOTSUPP);
+ err = sk->prot->connect(sk, (struct sockaddr_in *)uaddr, addr_len);
+ if (err < 0)
+ return(err);
+ sock->state = SS_CONNECTING;
+ }
+
+ if (sk->state > TCP_FIN_WAIT2 && sock->state==SS_CONNECTING)
+ {
+ sock->state=SS_UNCONNECTED;
+ cli();
+ err=sk->err;
+ sk->err=0;
+ sti();
+ return -err;
+ }
+
+ if (sk->state != TCP_ESTABLISHED &&(flags & O_NONBLOCK))
+ return(-EINPROGRESS);
+
+ cli(); /* avoid the race condition */
+ while(sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
+ {
+ interruptible_sleep_on(sk->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ return(-ERESTARTSYS);
+ }
+ /* This fixes a nasty in the tcp/ip code. There is a hideous hassle with
+ icmp error packets wanting to close a tcp or udp socket. */
+ if(sk->err && sk->protocol == IPPROTO_TCP)
+ {
+ sti();
+ sock->state = SS_UNCONNECTED;
+ err = -sk->err;
+ sk->err=0;
+ return err; /* set by tcp_err() */
+ }
+ }
+ sti();
+ sock->state = SS_CONNECTED;
+
+ if (sk->state != TCP_ESTABLISHED && sk->err)
+ {
+ sock->state = SS_UNCONNECTED;
+ err=sk->err;
+ sk->err=0;
+ return(-err);
+ }
+ return(0);
+}
+
+
+static int inet_socketpair(struct socket *sock1, struct socket *sock2)
+{
+ return(-EOPNOTSUPP);
+}
+
+
+/*
+ * FIXME: Get BSD behaviour
+ */
+
+static int inet_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ struct sock *sk1, *sk2;
+ int err;
+
+ sk1 = (struct sock *) sock->data;
+
+ /*
+ * We've been passed an extra socket.
+ * We need to free it up because the tcp module creates
+ * it's own when it accepts one.
+ */
+ if (newsock->data)
+ {
+ struct sock *sk=(struct sock *)newsock->data;
+ newsock->data=NULL;
+ sk->dead = 1;
+ destroy_sock(sk);
+ }
+
+ if (sk1->prot->accept == NULL)
+ return(-EOPNOTSUPP);
+
+ /* Restore the state if we have been interrupted, and then returned. */
+ if (sk1->pair != NULL )
+ {
+ sk2 = sk1->pair;
+ sk1->pair = NULL;
+ }
+ else
+ {
+ sk2 = sk1->prot->accept(sk1,flags);
+ if (sk2 == NULL)
+ {
+ if (sk1->err <= 0)
+ printk("Warning sock.c:sk1->err <= 0. Returning non-error.\n");
+ err=sk1->err;
+ sk1->err=0;
+ return(-err);
+ }
+ }
+ newsock->data = (void *)sk2;
+ sk2->sleep = newsock->wait;
+ sk2->socket = newsock;
+ newsock->conn = NULL;
+ if (flags & O_NONBLOCK)
+ return(0);
+
+ cli(); /* avoid the race. */
+ while(sk2->state == TCP_SYN_RECV)
+ {
+ interruptible_sleep_on(sk2->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ sk1->pair = sk2;
+ sk2->sleep = NULL;
+ sk2->socket=NULL;
+ newsock->data = NULL;
+ return(-ERESTARTSYS);
+ }
+ }
+ sti();
+
+ if (sk2->state != TCP_ESTABLISHED && sk2->err > 0)
+ {
+ err = -sk2->err;
+ sk2->err=0;
+ sk2->dead=1; /* ANK */
+ destroy_sock(sk2);
+ newsock->data = NULL;
+ return(err);
+ }
+ newsock->state = SS_CONNECTED;
+ return(0);
+}
+
+
+/*
+ * This does both peername and sockname.
+ */
+
+static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct sockaddr_in *sin=(struct sockaddr_in *)uaddr;
+ struct sock *sk;
+
+ sin->sin_family = AF_INET;
+ sk = (struct sock *) sock->data;
+ if (peer)
+ {
+ if (!tcp_connected(sk->state))
+ return(-ENOTCONN);
+ sin->sin_port = sk->dummy_th.dest;
+ sin->sin_addr.s_addr = sk->daddr;
+ }
+ else
+ {
+ sin->sin_port = sk->dummy_th.source;
+ if (sk->saddr == 0)
+ sin->sin_addr.s_addr = ip_my_addr();
+ else
+ sin->sin_addr.s_addr = sk->saddr;
+ }
+ *uaddr_len = sizeof(*sin);
+ return(0);
+}
+
+
+/*
+ * The assorted BSD I/O operations
+ */
+
+static int inet_recvfrom(struct socket *sock, void *ubuf, int size, int noblock,
+ unsigned flags, struct sockaddr *sin, int *addr_len )
+{
+ struct sock *sk = (struct sock *) sock->data;
+
+ if (sk->prot->recvfrom == NULL)
+ return(-EOPNOTSUPP);
+ if(sk->err)
+ return inet_error(sk);
+ /* We may need to bind the socket. */
+ if(inet_autobind(sk)!=0)
+ return(-EAGAIN);
+ return(sk->prot->recvfrom(sk, (unsigned char *) ubuf, size, noblock, flags,
+ (struct sockaddr_in*)sin, addr_len));
+}
+
+
+static int inet_recv(struct socket *sock, void *ubuf, int size, int noblock,
+ unsigned flags)
+{
+ /* BSD explicitly states these are the same - so we do it this way to be sure */
+ return inet_recvfrom(sock,ubuf,size,noblock,flags,NULL,NULL);
+}
+
+static int inet_read(struct socket *sock, char *ubuf, int size, int noblock)
+{
+ struct sock *sk = (struct sock *) sock->data;
+
+ if(sk->err)
+ return inet_error(sk);
+ /* We may need to bind the socket. */
+ if(inet_autobind(sk))
+ return(-EAGAIN);
+ return(sk->prot->read(sk, (unsigned char *) ubuf, size, noblock, 0));
+}
+
+static int inet_send(struct socket *sock, void *ubuf, int size, int noblock,
+ unsigned flags)
+{
+ struct sock *sk = (struct sock *) sock->data;
+ if (sk->shutdown & SEND_SHUTDOWN)
+ {
+ send_sig(SIGPIPE, current, 1);
+ return(-EPIPE);
+ }
+ if(sk->err)
+ return inet_error(sk);
+ /* We may need to bind the socket. */
+ if(inet_autobind(sk)!=0)
+ return(-EAGAIN);
+ return(sk->prot->write(sk, (unsigned char *) ubuf, size, noblock, flags));
+}
+
+static int inet_write(struct socket *sock, char *ubuf, int size, int noblock)
+{
+ return inet_send(sock,ubuf,size,noblock,0);
+}
+
+static int inet_sendto(struct socket *sock, void *ubuf, int size, int noblock,
+ unsigned flags, struct sockaddr *sin, int addr_len)
+{
+ struct sock *sk = (struct sock *) sock->data;
+ if (sk->shutdown & SEND_SHUTDOWN)
+ {
+ send_sig(SIGPIPE, current, 1);
+ return(-EPIPE);
+ }
+ if (sk->prot->sendto == NULL)
+ return(-EOPNOTSUPP);
+ if(sk->err)
+ return inet_error(sk);
+ /* We may need to bind the socket. */
+ if(inet_autobind(sk)!=0)
+ return -EAGAIN;
+ return(sk->prot->sendto(sk, (unsigned char *) ubuf, size, noblock, flags,
+ (struct sockaddr_in *)sin, addr_len));
+}
+
+
+static int inet_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk=(struct sock*)sock->data;
+
+ /*
+ * This should really check to make sure
+ * the socket is a TCP socket. (WHY AC...)
+ */
+ how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
+ 1->2 bit 2 snds.
+ 2->3 */
+ if ((how & ~SHUTDOWN_MASK) || how==0) /* MAXINT->0 */
+ return(-EINVAL);
+ if (sock->state == SS_CONNECTING && sk->state == TCP_ESTABLISHED)
+ sock->state = SS_CONNECTED;
+ if (!tcp_connected(sk->state))
+ return(-ENOTCONN);
+ sk->shutdown |= how;
+ if (sk->prot->shutdown)
+ sk->prot->shutdown(sk, how);
+ return(0);
+}
+
+
+static int inet_select(struct socket *sock, int sel_type, select_table *wait )
+{
+ struct sock *sk=(struct sock *) sock->data;
+ if (sk->prot->select == NULL)
+ {
+ return(0);
+ }
+ return(sk->prot->select(sk, sel_type, wait));
+}
+
+/*
+ * ioctl() calls you can issue on an INET socket. Most of these are
+ * device configuration and stuff and very rarely used. Some ioctls
+ * pass on to the socket itself.
+ *
+ * NOTE: I like the idea of a module for the config stuff. ie ifconfig
+ * loads the devconfigure module does its configuring and unloads it.
+ * There's a good 20K of config code hanging around the kernel.
+ */
+
+static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk=(struct sock *)sock->data;
+ int err;
+
+ switch(cmd)
+ {
+ case FIOSETOWN:
+ case SIOCSPGRP:
+ err=verify_area(VERIFY_READ,(int *)arg,sizeof(long));
+ if(err)
+ return err;
+ sk->proc = get_fs_long((int *) arg);
+ return(0);
+ case FIOGETOWN:
+ case SIOCGPGRP:
+ err=verify_area(VERIFY_WRITE,(void *) arg, sizeof(long));
+ if(err)
+ return err;
+ put_fs_long(sk->proc,(int *)arg);
+ return(0);
+ case SIOCGSTAMP:
+ if(sk->stamp.tv_sec==0)
+ return -ENOENT;
+ err=verify_area(VERIFY_WRITE,(void *)arg,sizeof(struct timeval));
+ if(err)
+ return err;
+ memcpy_tofs((void *)arg,&sk->stamp,sizeof(struct timeval));
+ return 0;
+ case SIOCADDRT: case SIOCADDRTOLD:
+ case SIOCDELRT: case SIOCDELRTOLD:
+ return(ip_rt_ioctl(cmd,(void *) arg));
+ case SIOCDARP:
+ case SIOCGARP:
+ case SIOCSARP:
+ return(arp_ioctl(cmd,(void *) arg));
+#ifdef CONFIG_INET_RARP
+ case SIOCDRARP:
+ case SIOCGRARP:
+ case SIOCSRARP:
+ return(rarp_ioctl(cmd,(void *) arg));
+#endif
+ case SIOCGIFCONF:
+ case SIOCGIFFLAGS:
+ case SIOCSIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ case SIOCGIFMEM:
+ case SIOCSIFMEM:
+ case SIOCGIFMTU:
+ case SIOCSIFMTU:
+ case SIOCSIFLINK:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case OLD_SIOCGIFHWADDR:
+ case SIOCSIFMAP:
+ case SIOCGIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCGIFSLAVE:
+ return(dev_ioctl(cmd,(void *) arg));
+
+ default:
+ if ((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15)))
+ return(dev_ioctl(cmd,(void *) arg));
+
+ if (sk->prot->ioctl==NULL)
+ return(-EINVAL);
+ return(sk->prot->ioctl(sk, cmd, arg));
+ }
+ /*NOTREACHED*/
+ return(0);
+}
+
+/*
+ * This routine must find a socket given a TCP or UDP header.
+ * Everything is assumed to be in net order.
+ *
+ * We give priority to more closely bound ports: if some socket
+ * is bound to a particular foreign address, it will get the packet
+ * rather than somebody listening to any address..
+ */
+
+struct sock *get_sock(struct proto *prot, unsigned short num,
+ unsigned long raddr,
+ unsigned short rnum, unsigned long laddr)
+{
+ struct sock *s;
+ struct sock *result = NULL;
+ int badness = -1;
+ unsigned short hnum;
+
+ hnum = ntohs(num);
+
+ /*
+ * SOCK_ARRAY_SIZE must be a power of two. This will work better
+ * than a prime unless 3 or more sockets end up using the same
+ * array entry. This should not be a problem because most
+ * well known sockets don't overlap that much, and for
+ * the other ones, we can just be careful about picking our
+ * socket number when we choose an arbitrary one.
+ */
+
+ for(s = prot->sock_array[hnum & (SOCK_ARRAY_SIZE - 1)];
+ s != NULL; s = s->next)
+ {
+ int score = 0;
+
+ if (s->num != hnum)
+ continue;
+
+ if(s->dead && (s->state == TCP_CLOSE))
+ continue;
+ /* local address matches? */
+ if (s->saddr) {
+ if (s->saddr != laddr)
+ continue;
+ score++;
+ }
+ /* remote address matches? */
+ if (s->daddr) {
+ if (s->daddr != raddr)
+ continue;
+ score++;
+ }
+ /* remote port matches? */
+ if (s->dummy_th.dest) {
+ if (s->dummy_th.dest != rnum)
+ continue;
+ score++;
+ }
+ /* perfect match? */
+ if (score == 3)
+ return s;
+ /* no, check if this is the best so far.. */
+ if (score <= badness)
+ continue;
+ result = s;
+ badness = score;
+ }
+ return result;
+}
+
+static struct proto_ops inet_proto_ops = {
+ AF_INET,
+
+ inet_create,
+ inet_dup,
+ inet_release,
+ inet_bind,
+ inet_connect,
+ inet_socketpair,
+ inet_accept,
+ inet_getname,
+ inet_read,
+ inet_write,
+ inet_select,
+ inet_ioctl,
+ inet_listen,
+ inet_send,
+ inet_recv,
+ inet_sendto,
+ inet_recvfrom,
+ inet_shutdown,
+ inet_setsockopt,
+ inet_getsockopt,
+ inet_fcntl,
+};
+
+extern unsigned long seq_offset;
+
+/*
+ * Called by socket.c on kernel startup.
+ */
+
+void inet_proto_init(struct net_proto *pro)
+{
+ struct inet_protocol *p;
+ int i;
+
+
+ printk("Swansea University Computer Society TCP/IP for NET3.017\n");
+
+ /*
+ * Tell SOCKET that we are alive...
+ */
+
+ (void) sock_register(inet_proto_ops.family, &inet_proto_ops);
+
+ seq_offset = CURRENT_TIME*250;
+
+ /*
+ * Add all the protocols.
+ */
+
+ for(i = 0; i < SOCK_ARRAY_SIZE; i++)
+ {
+ tcp_prot.sock_array[i] = NULL;
+ udp_prot.sock_array[i] = NULL;
+ raw_prot.sock_array[i] = NULL;
+ }
+
+ printk("IP Protocols: ");
+ for(p = inet_protocol_base; p != NULL;)
+ {
+ struct inet_protocol *tmp = (struct inet_protocol *) p->next;
+ inet_add_protocol(p);
+ printk("%s%s",p->name,tmp?", ":"\n");
+ p = tmp;
+ }
+ /*
+ * Set the ARP module up
+ */
+ arp_init();
+ /*
+ * Set the IP module up
+ */
+ ip_init();
+}
+
diff --git a/net/inet/arp.c b/net/inet/arp.c
new file mode 100644
index 000000000..67174bb7b
--- /dev/null
+++ b/net/inet/arp.c
@@ -0,0 +1,1228 @@
+/* linux/net/inet/arp.c
+ *
+ * Copyright (C) 1994 by Florian La Roche
+ *
+ * This module implements the Address Resolution Protocol ARP (RFC 826),
+ * which is used to convert IP addresses (or in the future maybe other
+ * high-level addresses into a low-level hardware address (like an Ethernet
+ * address).
+ *
+ * FIXME:
+ * Experiment with better retransmit timers
+ * Clean up the timer deletions
+ * If you create a proxy entry set your interface address to the address
+ * and then delete it, proxies may get out of sync with reality - check this
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Fixes:
+ * Alan Cox : Removed the ethernet assumptions in Florian's code
+ * Alan Cox : Fixed some small errors in the ARP logic
+ * Alan Cox : Allow >4K in /proc
+ * Alan Cox : Make ARP add its own protocol entry
+ *
+ * Ross Martin : Rewrote arp_rcv() and arp_get_info()
+ * Stephen Henson : Add AX25 support to arp_get_info()
+ * Alan Cox : Drop data when a device is downed.
+ * Alan Cox : Use init_timer().
+ * Alan Cox : Double lock fixes.
+ * Martin Seine : Move the arphdr structure
+ * to if_arp.h for compatibility
+ * with BSD based programs.
+ * Andrew Tridgell : Added ARP netmask code and
+ * re-arranged proxy handling
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/config.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <stdarg.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "ip.h"
+#include "route.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "arp.h"
+#ifdef CONFIG_AX25
+#include "ax25.h"
+#endif
+
+
+/*
+ * This structure defines the ARP mapping cache. As long as we make changes
+ * in this structure, we keep interrupts of. But normally we can copy the
+ * hardware address and the device pointer in a local variable and then make
+ * any "long calls" to send a packet out.
+ */
+
+struct arp_table
+{
+ struct arp_table *next; /* Linked entry list */
+ unsigned long last_used; /* For expiry */
+ unsigned int flags; /* Control status */
+ unsigned long ip; /* ip address of entry */
+ unsigned long mask; /* netmask - used for generalised proxy arps (tridge) */
+ unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
+ unsigned char hlen; /* Length of hardware address */
+ unsigned short htype; /* Type of hardware in use */
+ struct device *dev; /* Device the entry is tied to */
+
+ /*
+ * The following entries are only used for unresolved hw addresses.
+ */
+
+ struct timer_list timer; /* expire timer */
+ int retries; /* remaining retries */
+ struct sk_buff_head skb; /* list of queued packets */
+};
+
+
+/*
+ * Configurable Parameters (don't touch unless you know what you are doing
+ */
+
+/*
+ * If an arp request is send, ARP_RES_TIME is the timeout value until the
+ * next request is send.
+ */
+
+#define ARP_RES_TIME (250*(HZ/10))
+
+/*
+ * The number of times an arp request is send, until the host is
+ * considered unreachable.
+ */
+
+#define ARP_MAX_TRIES 3
+
+/*
+ * After that time, an unused entry is deleted from the arp table.
+ */
+
+#define ARP_TIMEOUT (600*HZ)
+
+/*
+ * How often is the function 'arp_check_retries' called.
+ * An entry is invalidated in the time between ARP_TIMEOUT and
+ * (ARP_TIMEOUT+ARP_CHECK_INTERVAL).
+ */
+
+#define ARP_CHECK_INTERVAL (60 * HZ)
+
+/* Forward declarations. */
+static void arp_check_expire (unsigned long);
+static struct arp_table *arp_lookup(unsigned long paddr, int exact);
+
+
+static struct timer_list arp_timer =
+ { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
+
+/*
+ * The default arp netmask is just 255.255.255.255 which means it's
+ * a single machine entry. Only proxy entries can have other netmasks
+ *
+*/
+
+#define DEF_ARP_NETMASK (~0)
+
+
+/*
+ * The size of the hash table. Must be a power of two.
+ * Maybe we should remove hashing in the future for arp and concentrate
+ * on Patrick Schaaf's Host-Cache-Lookup...
+ */
+
+
+#define ARP_TABLE_SIZE 16
+
+/* The ugly +1 here is to cater for proxy entries. They are put in their
+ own list for efficiency of lookup. If you don't want to find a proxy
+ entry then don't look in the last entry, otherwise do
+*/
+
+#define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
+
+struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
+{
+ NULL,
+};
+
+
+/*
+ * The last bits in the IP address are used for the cache lookup.
+ * A special entry is used for proxy arp entries
+ */
+
+#define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
+#define PROXY_HASH ARP_TABLE_SIZE
+
+/*
+ * Check if there are too old entries and remove them. If the ATF_PERM
+ * flag is set, they are always left in the arp cache (permanent entry).
+ * Note: Only fully resolved entries, which don't have any packets in
+ * the queue, can be deleted, since ARP_TIMEOUT is much greater than
+ * ARP_MAX_TRIES*ARP_RES_TIME.
+ */
+
+static void arp_check_expire(unsigned long dummy)
+{
+ int i;
+ unsigned long now = jiffies;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
+ {
+ struct arp_table *entry;
+ struct arp_table **pentry = &arp_tables[i];
+
+ while ((entry = *pentry) != NULL)
+ {
+ if ((now - entry->last_used) > ARP_TIMEOUT
+ && !(entry->flags & ATF_PERM))
+ {
+ *pentry = entry->next; /* remove from list */
+ del_timer(&entry->timer); /* Paranoia */
+ kfree_s(entry, sizeof(struct arp_table));
+ }
+ else
+ pentry = &entry->next; /* go to next entry */
+ }
+ }
+ restore_flags(flags);
+
+ /*
+ * Set the timer again.
+ */
+
+ del_timer(&arp_timer);
+ arp_timer.expires = ARP_CHECK_INTERVAL;
+ add_timer(&arp_timer);
+}
+
+
+/*
+ * Release all linked skb's and the memory for this entry.
+ */
+
+static void arp_release_entry(struct arp_table *entry)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ /* Release the list of `skb' pointers. */
+ while ((skb = skb_dequeue(&entry->skb)) != NULL)
+ {
+ skb_device_lock(skb);
+ restore_flags(flags);
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ restore_flags(flags);
+ del_timer(&entry->timer);
+ kfree_s(entry, sizeof(struct arp_table));
+ return;
+}
+
+/*
+ * Purge a device from the ARP queue
+ */
+
+void arp_device_down(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+
+ /*
+ * This is a bit OTT - maybe we need some arp semaphores instead.
+ */
+ save_flags(flags);
+ cli();
+ for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
+ {
+ struct arp_table *entry;
+ struct arp_table **pentry = &arp_tables[i];
+
+ while ((entry = *pentry) != NULL)
+ {
+ if(entry->dev==dev)
+ {
+ *pentry = entry->next; /* remove from list */
+ del_timer(&entry->timer); /* Paranoia */
+ kfree_s(entry, sizeof(struct arp_table));
+ }
+ else
+ pentry = &entry->next; /* go to next entry */
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
+ * message.
+ */
+
+void arp_send(int type, int ptype, unsigned long dest_ip,
+ struct device *dev, unsigned long src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw)
+{
+ struct sk_buff *skb;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+
+ /*
+ * No arp on this interface.
+ */
+
+ if(dev->flags&IFF_NOARP)
+ return;
+
+ /*
+ * Allocate a buffer
+ */
+
+ skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
+ + dev->hard_header_len, GFP_ATOMIC);
+ if (skb == NULL)
+ {
+ printk("ARP: no memory to send an arp packet\n");
+ return;
+ }
+ skb->len = sizeof(struct arphdr) + dev->hard_header_len + 2*(dev->addr_len+4);
+ skb->arp = 1;
+ skb->dev = dev;
+ skb->free = 1;
+
+ /*
+ * Fill the device header for the ARP frame
+ */
+
+ dev->hard_header(skb->data,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len,skb);
+
+ /* Fill out the arp protocol part. */
+ arp = (struct arphdr *) (skb->data + dev->hard_header_len);
+ arp->ar_hrd = htons(dev->type);
+#ifdef CONFIG_AX25
+ arp->ar_pro = (dev->type != ARPHRD_AX25)? htons(ETH_P_IP) : htons(AX25_P_IP);
+#else
+ arp->ar_pro = htons(ETH_P_IP);
+#endif
+ arp->ar_hln = dev->addr_len;
+ arp->ar_pln = 4;
+ arp->ar_op = htons(type);
+
+ arp_ptr=(unsigned char *)(arp+1);
+
+ memcpy(arp_ptr, src_hw, dev->addr_len);
+ arp_ptr+=dev->addr_len;
+ memcpy(arp_ptr, &src_ip,4);
+ arp_ptr+=4;
+ if (dest_hw != NULL)
+ memcpy(arp_ptr, dest_hw, dev->addr_len);
+ else
+ memset(arp_ptr, 0, dev->addr_len);
+ arp_ptr+=dev->addr_len;
+ memcpy(arp_ptr, &dest_ip, 4);
+
+ dev_queue_xmit(skb, dev, 0);
+}
+
+
+/*
+ * This function is called, if an entry is not resolved in ARP_RES_TIME.
+ * Either resend a request, or give it up and free the entry.
+ */
+
+static void arp_expire_request (unsigned long arg)
+{
+ struct arp_table *entry = (struct arp_table *) arg;
+ struct arp_table **pentry;
+ unsigned long hash;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Since all timeouts are handled with interrupts enabled, there is a
+ * small chance, that this entry has just been resolved by an incoming
+ * packet. This is the only race condition, but it is handled...
+ */
+
+ if (entry->flags & ATF_COM)
+ {
+ restore_flags(flags);
+ return;
+ }
+
+ if (--entry->retries > 0)
+ {
+ unsigned long ip = entry->ip;
+ struct device *dev = entry->dev;
+
+ /* Set new timer. */
+ del_timer(&entry->timer);
+ entry->timer.expires = ARP_RES_TIME;
+ add_timer(&entry->timer);
+ restore_flags(flags);
+ arp_send(ARPOP_REQUEST, ETH_P_ARP, ip, dev, dev->pa_addr,
+ NULL, dev->dev_addr);
+ return;
+ }
+
+ /*
+ * Arp request timed out. Delete entry and all waiting packets.
+ * If we give each entry a pointer to itself, we don't have to
+ * loop through everything again. Maybe hash is good enough, but
+ * I will look at it later.
+ */
+
+ hash = HASH(entry->ip);
+
+ /* proxy entries shouldn't really time out so this is really
+ only here for completeness
+ */
+ if (entry->flags & ATF_PUBL)
+ pentry = &arp_tables[PROXY_HASH];
+ else
+ pentry = &arp_tables[hash];
+ while (*pentry != NULL)
+ {
+ if (*pentry == entry)
+ {
+ *pentry = entry->next; /* delete from linked list */
+ del_timer(&entry->timer);
+ restore_flags(flags);
+ arp_release_entry(entry);
+ return;
+ }
+ pentry = &(*pentry)->next;
+ }
+ restore_flags(flags);
+ printk("Possible ARP queue corruption.\n");
+ /*
+ * We should never arrive here.
+ */
+}
+
+
+/*
+ * This will try to retransmit everything on the queue.
+ */
+
+static void arp_send_q(struct arp_table *entry, unsigned char *hw_dest)
+{
+ struct sk_buff *skb;
+
+ unsigned long flags;
+
+ /*
+ * Empty the entire queue, building its data up ready to send
+ */
+
+ if(!(entry->flags&ATF_COM))
+ {
+ printk("arp_send_q: incomplete entry for %s\n",
+ in_ntoa(entry->ip));
+ return;
+ }
+
+ save_flags(flags);
+
+ cli();
+ while((skb = skb_dequeue(&entry->skb)) != NULL)
+ {
+ IS_SKB(skb);
+ skb_device_lock(skb);
+ restore_flags(flags);
+ if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
+ {
+ skb->arp = 1;
+ if(skb->sk==NULL)
+ dev_queue_xmit(skb, skb->dev, 0);
+ else
+ dev_queue_xmit(skb,skb->dev,skb->sk->priority);
+ }
+ else
+ {
+ /* This routine is only ever called when 'entry' is
+ complete. Thus this can't fail. */
+ printk("arp_send_q: The impossible occurred. Please notify Alan.\n");
+ printk("arp_send_q: active entity %s\n",in_ntoa(entry->ip));
+ printk("arp_send_q: failed to find %s\n",in_ntoa(skb->raddr));
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Delete an ARP mapping entry in the cache.
+ */
+
+void arp_destroy(unsigned long ip_addr, int force)
+{
+ int checked_proxies = 0;
+ struct arp_table *entry;
+ struct arp_table **pentry;
+ unsigned long hash = HASH(ip_addr);
+
+ cli();
+ pentry = &arp_tables[hash];
+ if (! *pentry) /* also check proxy entries */
+ pentry = &arp_tables[PROXY_HASH];
+
+ while ((entry = *pentry) != NULL)
+ {
+ if (entry->ip == ip_addr)
+ {
+ if ((entry->flags & ATF_PERM) && !force)
+ return;
+ *pentry = entry->next;
+ del_timer(&entry->timer);
+ sti();
+ arp_release_entry(entry);
+ return;
+ }
+ pentry = &entry->next;
+ if (!checked_proxies && ! *pentry)
+ { /* ugly. we have to make sure we check proxy
+ entries as well */
+ checked_proxies = 1;
+ pentry = &arp_tables[PROXY_HASH];
+ }
+ }
+ sti();
+}
+
+
+/*
+ * Receive an arp request by the device layer. Maybe I rewrite it, to
+ * use the incoming packet for the reply. The time for the current
+ * "overhead" isn't that high...
+ */
+
+int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
+{
+/*
+ * We shouldn't use this type conversion. Check later.
+ */
+
+ struct arphdr *arp = (struct arphdr *)skb->h.raw;
+ unsigned char *arp_ptr= (unsigned char *)(arp+1);
+ struct arp_table *entry;
+ struct arp_table *proxy_entry;
+ int addr_hint,hlen,htype;
+ unsigned long hash;
+ unsigned char ha[MAX_ADDR_LEN]; /* So we can enable ints again. */
+ long sip,tip;
+ unsigned char *sha,*tha;
+
+/*
+ * The hardware length of the packet should match the hardware length
+ * of the device. Similarly, the hardware types should match. The
+ * device should be ARP-able. Also, if pln is not 4, then the lookup
+ * is not from an IP number. We can't currently handle this, so toss
+ * it.
+ */
+ if (arp->ar_hln != dev->addr_len ||
+ dev->type != ntohs(arp->ar_hrd) ||
+ dev->flags & IFF_NOARP ||
+ arp->ar_pln != 4)
+ {
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+
+/*
+ * Another test.
+ * The logic here is that the protocol being looked up by arp should
+ * match the protocol the device speaks. If it doesn't, there is a
+ * problem, so toss the packet.
+ */
+ switch(dev->type)
+ {
+#ifdef CONFIG_AX25
+ case ARPHRD_AX25:
+ if(arp->ar_pro != htons(AX25_P_IP))
+ {
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+ break;
+#endif
+ case ARPHRD_ETHER:
+ case ARPHRD_ARCNET:
+ if(arp->ar_pro != htons(ETH_P_IP))
+ {
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+ break;
+
+ default:
+ printk("ARP: dev->type mangled!\n");
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+
+/*
+ * Extract fields
+ */
+
+ hlen = dev->addr_len;
+ htype = dev->type;
+
+ sha=arp_ptr;
+ arp_ptr+=hlen;
+ memcpy(&sip,arp_ptr,4);
+ arp_ptr+=4;
+ tha=arp_ptr;
+ arp_ptr+=hlen;
+ memcpy(&tip,arp_ptr,4);
+
+/*
+ * Check for bad requests for 127.0.0.1. If this is one such, delete it.
+ */
+ if(tip == INADDR_LOOPBACK)
+ {
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+
+/*
+ * Process entry. The idea here is we want to send a reply if it is a
+ * request for us or if it is a request for someone else that we hold
+ * a proxy for. We want to add an entry to our cache if it is a reply
+ * to us or if it is a request for our address.
+ * (The assumption for this last is that if someone is requesting our
+ * address, they are probably intending to talk to us, so it saves time
+ * if we cache their address. Their address is also probably not in
+ * our cache, since ours is not in their cache.)
+ *
+ * Putting this another way, we only care about replies if they are to
+ * us, in which case we add them to the cache. For requests, we care
+ * about those for us and those for our proxies. We reply to both,
+ * and in the case of requests for us we add the requester to the arp
+ * cache.
+ */
+
+ addr_hint = ip_chk_addr(tip);
+
+ if(arp->ar_op == htons(ARPOP_REPLY))
+ {
+ if(addr_hint!=IS_MYADDR)
+ {
+/*
+ * Replies to other machines get tossed.
+ */
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+/*
+ * Fall through to code below that adds sender to cache.
+ */
+ }
+ else
+ {
+/*
+ * It is now an arp request
+ */
+ if(addr_hint != IS_MYADDR)
+ {
+/*
+ * To get in here, it is a request for someone else. We need to
+ * check if that someone else is one of our proxies. If it isn't,
+ * we can toss it.
+ */
+ cli();
+ for(proxy_entry=arp_tables[PROXY_HASH];
+ proxy_entry;
+ proxy_entry = proxy_entry->next)
+ {
+ /* we will respond to a proxy arp request
+ if the masked arp table ip matches the masked
+ tip. This allows a single proxy arp table
+ entry to be used on a gateway machine to handle
+ all requests for a whole network, rather than
+ having to use a huge number of proxy arp entries
+ and having to keep them uptodate.
+ */
+ if (proxy_entry->htype == htype &&
+ !((proxy_entry->ip^tip)&proxy_entry->mask))
+ break;
+
+ }
+ if (proxy_entry)
+ {
+ memcpy(ha, proxy_entry->ha, hlen);
+ sti();
+ arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha);
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+ else
+ {
+ sti();
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+ }
+ else
+ {
+/*
+ * To get here, it must be an arp request for us. We need to reply.
+ */
+ arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr);
+ }
+ }
+
+
+/*
+ * Now all replies are handled. Next, anything that falls through to here
+ * needs to be added to the arp cache, or have its entry updated if it is
+ * there.
+ */
+
+ hash = HASH(sip);
+ cli();
+ for(entry=arp_tables[hash];entry;entry=entry->next)
+ if(entry->ip==sip && entry->htype==htype)
+ break;
+
+ if(entry)
+ {
+/*
+ * Entry found; update it.
+ */
+ memcpy(entry->ha, sha, hlen);
+ entry->hlen = hlen;
+ entry->last_used = jiffies;
+ if (!(entry->flags & ATF_COM))
+ {
+/*
+ * This entry was incomplete. Delete the retransmit timer
+ * and switch to complete status.
+ */
+ del_timer(&entry->timer);
+ entry->flags |= ATF_COM;
+ sti();
+/*
+ * Send out waiting packets. We might have problems, if someone is
+ * manually removing entries right now -- entry might become invalid
+ * underneath us.
+ */
+ arp_send_q(entry, sha);
+ }
+ else
+ {
+ sti();
+ }
+ }
+ else
+ {
+/*
+ * No entry found. Need to add a new entry to the arp table.
+ */
+ entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
+ if(entry == NULL)
+ {
+ sti();
+ printk("ARP: no memory for new arp entry\n");
+
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+
+ entry->mask = DEF_ARP_NETMASK;
+ entry->ip = sip;
+ entry->hlen = hlen;
+ entry->htype = htype;
+ entry->flags = ATF_COM;
+ init_timer(&entry->timer);
+ memcpy(entry->ha, sha, hlen);
+ entry->last_used = jiffies;
+ entry->dev = skb->dev;
+ skb_queue_head_init(&entry->skb);
+ entry->next = arp_tables[hash];
+ arp_tables[hash] = entry;
+ sti();
+ }
+
+/*
+ * Replies have been sent, and entries have been added. All done.
+ */
+ kfree_skb(skb, FREE_READ);
+ return 0;
+}
+
+
+/*
+ * Find an arp mapping in the cache. If not found, post a request.
+ */
+
+int arp_find(unsigned char *haddr, unsigned long paddr, struct device *dev,
+ unsigned long saddr, struct sk_buff *skb)
+{
+ struct arp_table *entry;
+ unsigned long hash;
+ switch (ip_chk_addr(paddr))
+ {
+ case IS_MYADDR:
+ printk("ARP: arp called for own IP address\n");
+ memcpy(haddr, dev->dev_addr, dev->addr_len);
+ skb->arp = 1;
+ return 0;
+ case IS_BROADCAST:
+ memcpy(haddr, dev->broadcast, dev->addr_len);
+ skb->arp = 1;
+ return 0;
+ }
+
+ hash = HASH(paddr);
+ cli();
+
+ /*
+ * Find an entry
+ */
+ entry = arp_lookup(paddr, 0);
+
+ if (entry != NULL) /* It exists */
+ {
+ if (!(entry->flags & ATF_COM))
+ {
+ /*
+ * A request was already send, but no reply yet. Thus
+ * queue the packet with the previous attempt
+ */
+
+ if (skb != NULL)
+ {
+ skb_queue_tail(&entry->skb, skb);
+ skb_device_unlock(skb);
+ }
+ sti();
+ return 1;
+ }
+
+ /*
+ * Update the record
+ */
+
+ entry->last_used = jiffies;
+ memcpy(haddr, entry->ha, dev->addr_len);
+ if (skb)
+ skb->arp = 1;
+ sti();
+ return 0;
+ }
+
+ /*
+ * Create a new unresolved entry.
+ */
+
+ entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
+ GFP_ATOMIC);
+ if (entry != NULL)
+ {
+ entry->mask = DEF_ARP_NETMASK;
+ entry->ip = paddr;
+ entry->hlen = dev->addr_len;
+ entry->htype = dev->type;
+ entry->flags = 0;
+ memset(entry->ha, 0, dev->addr_len);
+ entry->dev = dev;
+ entry->last_used = jiffies;
+ init_timer(&entry->timer);
+ entry->timer.function = arp_expire_request;
+ entry->timer.data = (unsigned long)entry;
+ entry->timer.expires = ARP_RES_TIME;
+ entry->next = arp_tables[hash];
+ arp_tables[hash] = entry;
+ add_timer(&entry->timer);
+ entry->retries = ARP_MAX_TRIES;
+ skb_queue_head_init(&entry->skb);
+ if (skb != NULL)
+ {
+ skb_queue_tail(&entry->skb, skb);
+ skb_device_unlock(skb);
+ }
+ }
+ else
+ {
+ if (skb != NULL && skb->free)
+ kfree_skb(skb, FREE_WRITE);
+ }
+ sti();
+
+ /*
+ * If we didn't find an entry, we will try to send an ARP packet.
+ */
+
+ arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
+ dev->dev_addr);
+
+ return 1;
+}
+
+
+/*
+ * Write the contents of the ARP cache to a PROCfs file.
+ */
+
+#define HBUFFERLEN 30
+
+int arp_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+ struct arp_table *entry;
+ char hbuffer[HBUFFERLEN];
+ int i,j,k;
+ const char hexbuf[] = "0123456789ABCDEF";
+
+ size = sprintf(buffer,"IP address HW type Flags HW address Mask\n");
+
+ pos+=size;
+ len+=size;
+
+ cli();
+ for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
+ {
+ for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
+ {
+/*
+ * Convert hardware address to XX:XX:XX:XX ... form.
+ */
+#ifdef CONFIG_AX25
+
+ if(entry->htype==ARPHRD_AX25)
+ strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
+ else {
+#endif
+
+ for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->hlen;j++)
+ {
+ hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
+ hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
+ hbuffer[k++]=':';
+ }
+ hbuffer[--k]=0;
+
+#ifdef CONFIG_AX25
+ }
+#endif
+ size = sprintf(buffer+len,
+ "%-17s0x%-10x0x%-10x%s",
+ in_ntoa(entry->ip),
+ (unsigned int)entry->htype,
+ entry->flags,
+ hbuffer);
+ size += sprintf(buffer+len+size,
+ " %-17s\n",
+ entry->mask==DEF_ARP_NETMASK?
+ "*":in_ntoa(entry->mask));
+
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+ }
+ sti();
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+
+
+/*
+ * This will find an entry in the ARP table by looking at the IP address.
+ * If exact is true then only exact IP matches will be allowed
+ * for proxy entries, otherwise the netmask will be used
+ */
+
+static struct arp_table *arp_lookup(unsigned long paddr, int exact)
+{
+ struct arp_table *entry;
+ unsigned long hash = HASH(paddr);
+
+ for (entry = arp_tables[hash]; entry != NULL; entry = entry->next)
+ if (entry->ip == paddr) break;
+
+ /* it's possibly a proxy entry (with a netmask) */
+ if (!entry)
+ for (entry=arp_tables[PROXY_HASH]; entry != NULL; entry = entry->next)
+ if (exact? (entry->ip==paddr) : !((entry->ip^paddr)&entry->mask))
+ break;
+
+ return entry;
+}
+
+
+/*
+ * Set (create) an ARP cache entry.
+ */
+
+static int arp_req_set(struct arpreq *req)
+{
+ struct arpreq r;
+ struct arp_table *entry;
+ struct sockaddr_in *si;
+ int htype, hlen;
+ unsigned long ip;
+ struct rtable *rt;
+
+ memcpy_fromfs(&r, req, sizeof(r));
+
+ /* We only understand about IP addresses... */
+ if (r.arp_pa.sa_family != AF_INET)
+ return -EPFNOSUPPORT;
+
+ /*
+ * Find out about the hardware type.
+ * We have to be compatible with BSD UNIX, so we have to
+ * assume that a "not set" value (i.e. 0) means Ethernet.
+ */
+
+ switch (r.arp_ha.sa_family) {
+ case ARPHRD_ETHER:
+ htype = ARPHRD_ETHER;
+ hlen = ETH_ALEN;
+ break;
+ case ARPHRD_ARCNET:
+ htype = ARPHRD_ARCNET;
+ hlen = 1; /* length of arcnet addresses */
+ break;
+#ifdef CONFIG_AX25
+ case ARPHRD_AX25:
+ htype = ARPHRD_AX25;
+ hlen = 7;
+ break;
+#endif
+ default:
+ return -EPFNOSUPPORT;
+ }
+
+ si = (struct sockaddr_in *) &r.arp_pa;
+ ip = si->sin_addr.s_addr;
+ if (ip == 0)
+ {
+ printk("ARP: SETARP: requested PA is 0.0.0.0 !\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Is it reachable directly ?
+ */
+
+ rt = ip_rt_route(ip, NULL, NULL);
+ if (rt == NULL)
+ return -ENETUNREACH;
+
+ /*
+ * Is there an existing entry for this address?
+ */
+
+ cli();
+
+ /*
+ * Find the entry
+ */
+ entry = arp_lookup(ip, 1);
+
+ /*
+ * Do we need to create a new entry
+ */
+
+ if (entry == NULL)
+ {
+ unsigned long hash = HASH(ip);
+ if (r.arp_flags & ATF_PUBL)
+ hash = PROXY_HASH;
+
+ entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
+ GFP_ATOMIC);
+ if (entry == NULL)
+ {
+ sti();
+ return -ENOMEM;
+ }
+ entry->ip = ip;
+ entry->hlen = hlen;
+ entry->htype = htype;
+ init_timer(&entry->timer);
+ entry->next = arp_tables[hash];
+ arp_tables[hash] = entry;
+ skb_queue_head_init(&entry->skb);
+ }
+ /*
+ * We now have a pointer to an ARP entry. Update it!
+ */
+
+ memcpy(&entry->ha, &r.arp_ha.sa_data, hlen);
+ entry->last_used = jiffies;
+ entry->flags = r.arp_flags | ATF_COM;
+ if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
+ {
+ si = (struct sockaddr_in *) &r.arp_netmask;
+ entry->mask = si->sin_addr.s_addr;
+ }
+ else
+ entry->mask = DEF_ARP_NETMASK;
+ entry->dev = rt->rt_dev;
+ sti();
+
+ return 0;
+}
+
+
+/*
+ * Get an ARP cache entry.
+ */
+
+static int arp_req_get(struct arpreq *req)
+{
+ struct arpreq r;
+ struct arp_table *entry;
+ struct sockaddr_in *si;
+
+ /*
+ * We only understand about IP addresses...
+ */
+
+ memcpy_fromfs(&r, req, sizeof(r));
+
+ if (r.arp_pa.sa_family != AF_INET)
+ return -EPFNOSUPPORT;
+
+ /*
+ * Is there an existing entry for this address?
+ */
+
+ si = (struct sockaddr_in *) &r.arp_pa;
+ cli();
+ entry = arp_lookup(si->sin_addr.s_addr,0);
+
+ if (entry == NULL)
+ {
+ sti();
+ return -ENXIO;
+ }
+
+ /*
+ * We found it; copy into structure.
+ */
+
+ memcpy(r.arp_ha.sa_data, &entry->ha, entry->hlen);
+ r.arp_ha.sa_family = entry->htype;
+ r.arp_flags = entry->flags;
+ sti();
+
+ /*
+ * Copy the information back
+ */
+
+ memcpy_tofs(req, &r, sizeof(r));
+ return 0;
+}
+
+
+/*
+ * Handle an ARP layer I/O control request.
+ */
+
+int arp_ioctl(unsigned int cmd, void *arg)
+{
+ struct arpreq r;
+ struct sockaddr_in *si;
+ int err;
+
+ switch(cmd)
+ {
+ case SIOCDARP:
+ if (!suser())
+ return -EPERM;
+ err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
+ if(err)
+ return err;
+ memcpy_fromfs(&r, arg, sizeof(r));
+ if (r.arp_pa.sa_family != AF_INET)
+ return -EPFNOSUPPORT;
+ si = (struct sockaddr_in *) &r.arp_pa;
+ arp_destroy(si->sin_addr.s_addr, 1);
+ return 0;
+ case SIOCGARP:
+ err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
+ if(err)
+ return err;
+ return arp_req_get((struct arpreq *)arg);
+ case SIOCSARP:
+ if (!suser())
+ return -EPERM;
+ err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
+ if(err)
+ return err;
+ return arp_req_set((struct arpreq *)arg);
+ default:
+ return -EINVAL;
+ }
+ /*NOTREACHED*/
+ return 0;
+}
+
+
+/*
+ * Called once on startup.
+ */
+
+static struct packet_type arp_packet_type =
+{
+ 0, /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
+ 0, /* copy */
+ arp_rcv,
+ NULL,
+ NULL
+};
+
+void arp_init (void)
+{
+ /* Register the packet type */
+ arp_packet_type.type=htons(ETH_P_ARP);
+ dev_add_pack(&arp_packet_type);
+ /* Start with the regular checks for expired arp entries. */
+ add_timer(&arp_timer);
+}
+
diff --git a/net/inet/arp.h b/net/inet/arp.h
new file mode 100644
index 000000000..a68adc30a
--- /dev/null
+++ b/net/inet/arp.h
@@ -0,0 +1,18 @@
+/* linux/net/inet/arp.h */
+#ifndef _ARP_H
+#define _ARP_H
+
+extern void arp_init(void);
+extern void arp_destroy(unsigned long paddr, int force);
+extern void arp_device_down(struct device *dev);
+extern int arp_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int arp_find(unsigned char *haddr, unsigned long paddr,
+ struct device *dev, unsigned long saddr, struct sk_buff *skb);
+extern int arp_get_info(char *buffer, char **start, off_t origin, int length);
+extern int arp_ioctl(unsigned int cmd, void *arg);
+extern void arp_send(int type, int ptype, unsigned long dest_ip,
+ struct device *dev, unsigned long src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw);
+
+#endif /* _ARP_H */
diff --git a/net/inet/datagram.c b/net/inet/datagram.c
new file mode 100644
index 000000000..116c16e3e
--- /dev/null
+++ b/net/inet/datagram.c
@@ -0,0 +1,209 @@
+/*
+ * SUCS NET3:
+ *
+ * Generic datagram handling routines. These are generic for all protocols. Possibly a generic IP version on top
+ * of these would make sense. Not tonight however 8-).
+ * This is used because UDP, RAW, PACKET and the to be released IPX layer all have identical select code and mostly
+ * identical recvfrom() code. So we share it here. The select was shared before but buried in udp.c so I moved it.
+ *
+ * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>. (datagram_select() from old udp.c code)
+ *
+ * Fixes:
+ * Alan Cox : NULL return from skb_peek_copy() understood
+ * Alan Cox : Rewrote skb_read_datagram to avoid the skb_peek_copy stuff.
+ * Alan Cox : Added support for SOCK_SEQPACKET. IPX can no longer use the SO_TYPE hack but
+ * AX.25 now works right, and SPX is feasible.
+ * Alan Cox : Fixed write select of non IP protocol crash.
+ * Florian La Roche: Changed for my new skbuff handling.
+ *
+ * Note:
+ * A lot of this will change when the protocol/socket separation
+ * occurs. Using this will make things reasonably clean.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "route.h"
+#include "tcp.h"
+#include "udp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+
+
+/*
+ * Get a datagram skbuff, understands the peeking, nonblocking wakeups and possible
+ * races. This replaces identical code in packet,raw and udp, as well as the yet to
+ * be released IPX support. It also finally fixes the long standing peek and read
+ * race for datagram sockets. If you alter this routine remember it must be
+ * re-entrant.
+ */
+
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err)
+{
+ struct sk_buff *skb;
+
+ /* Socket is inuse - so the timer doesn't attack it */
+restart:
+ sk->inuse = 1;
+ while(skb_peek(&sk->receive_queue) == NULL) /* No data */
+ {
+ /* If we are shutdown then no more data is going to appear. We are done */
+ if (sk->shutdown & RCV_SHUTDOWN)
+ {
+ release_sock(sk);
+ *err=0;
+ return NULL;
+ }
+
+ if(sk->err)
+ {
+ release_sock(sk);
+ *err=-sk->err;
+ sk->err=0;
+ return NULL;
+ }
+
+ /* Sequenced packets can come disconnected. If so we report the problem */
+ if(sk->type==SOCK_SEQPACKET && sk->state!=TCP_ESTABLISHED)
+ {
+ release_sock(sk);
+ *err=-ENOTCONN;
+ return NULL;
+ }
+
+ /* User doesn't want to wait */
+ if (noblock)
+ {
+ release_sock(sk);
+ *err=-EAGAIN;
+ return NULL;
+ }
+ release_sock(sk);
+
+ /* Interrupts off so that no packet arrives before we begin sleeping.
+ Otherwise we might miss our wake up */
+ cli();
+ if (skb_peek(&sk->receive_queue) == NULL)
+ {
+ interruptible_sleep_on(sk->sleep);
+ /* Signals may need a restart of the syscall */
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ *err=-ERESTARTSYS;
+ return(NULL);
+ }
+ if(sk->err != 0) /* Error while waiting for packet
+ eg an icmp sent earlier by the
+ peer has finally turned up now */
+ {
+ *err = -sk->err;
+ sti();
+ sk->err=0;
+ return NULL;
+ }
+ }
+ sk->inuse = 1;
+ sti();
+ }
+ /* Again only user level code calls this function, so nothing interrupt level
+ will suddenly eat the receive_queue */
+ if (!(flags & MSG_PEEK))
+ {
+ skb=skb_dequeue(&sk->receive_queue);
+ if(skb!=NULL)
+ skb->users++;
+ else
+ goto restart; /* Avoid race if someone beats us to the data */
+ }
+ else
+ {
+ cli();
+ skb=skb_peek(&sk->receive_queue);
+ if(skb!=NULL)
+ skb->users++;
+ sti();
+ if(skb==NULL) /* shouldn't happen but .. */
+ *err=-EAGAIN;
+ }
+ return skb;
+}
+
+void skb_free_datagram(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ skb->users--;
+ if(skb->users>0)
+ {
+ restore_flags(flags);
+ return;
+ }
+ /* See if it needs destroying */
+ if(!skb->next && !skb->prev) /* Been dequeued by someone - ie its read */
+ kfree_skb(skb,FREE_READ);
+ restore_flags(flags);
+}
+
+void skb_copy_datagram(struct sk_buff *skb, int offset, char *to, int size)
+{
+ /* We will know all about the fraglist options to allow >4K receives
+ but not this release */
+ memcpy_tofs(to,skb->h.raw+offset,size);
+}
+
+/*
+ * Datagram select: Again totally generic. Moved from udp.c
+ * Now does seqpacket.
+ */
+
+int datagram_select(struct sock *sk, int sel_type, select_table *wait)
+{
+ select_wait(sk->sleep, wait);
+ switch(sel_type)
+ {
+ case SEL_IN:
+ if (sk->type==SOCK_SEQPACKET && sk->state==TCP_CLOSE)
+ {
+ /* Connection closed: Wake up */
+ return(1);
+ }
+ if (skb_peek(&sk->receive_queue) != NULL || sk->err != 0)
+ { /* This appears to be consistent
+ with other stacks */
+ return(1);
+ }
+ return(0);
+
+ case SEL_OUT:
+ if (sk->prot && sk->prot->wspace(sk) >= MIN_WRITE_SPACE)
+ {
+ return(1);
+ }
+ if (sk->prot==NULL && sk->sndbuf-sk->wmem_alloc >= MIN_WRITE_SPACE)
+ {
+ return(1);
+ }
+ return(0);
+
+ case SEL_EX:
+ if (sk->err)
+ return(1); /* Socket has gone into error state (eg icmp error) */
+ return(0);
+ }
+ return(0);
+}
diff --git a/net/inet/datalink.h b/net/inet/datalink.h
new file mode 100644
index 000000000..ba345f3b5
--- /dev/null
+++ b/net/inet/datalink.h
@@ -0,0 +1,17 @@
+#ifndef _NET_INET_DATALINK_H_
+#define _NET_INET_DATALINK_H_
+
+struct datalink_proto {
+ unsigned short type_len;
+ unsigned char type[8];
+ unsigned short datalink_type;
+ unsigned short header_length;
+ int (*rcvfunc)(struct sk_buff *, struct device *,
+ struct packet_type *);
+ void (*datalink_header)(struct datalink_proto *, struct sk_buff *,
+ unsigned char *);
+ struct datalink_proto *next;
+};
+
+#endif
+
diff --git a/net/inet/dev.c b/net/inet/dev.c
new file mode 100644
index 000000000..973070388
--- /dev/null
+++ b/net/inet/dev.c
@@ -0,0 +1,1472 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ *
+ * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have
+ * the rest as well commented in the end.
+ */
+
+/*
+ * A lot of these includes will be going walkies very soon
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "ip.h"
+#include "route.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "arp.h"
+
+
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base = NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog =
+{
+ (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
+#ifdef CONFIG_SKB_CHECK
+ ,SK_HEAD_SKB
+#endif
+};
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * The number of sockets open for 'all' protocol use. We have to
+ * know this to copy a buffer the correct number of times.
+ */
+
+static int dev_nit=0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+
+/*
+ * Add a protocol ID to the list.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ struct packet_type *p1;
+ pt->next = ptype_base;
+
+ /*
+ * Don't use copy counts on ETH_P_ALL. Instead keep a global
+ * count of number of these and use it and pt->copy to decide
+ * copies
+ */
+
+ pt->copy=0; /* Assume we will not be copying the buffer before
+ * this routine gets it
+ */
+
+ if(pt->type == htons(ETH_P_ALL))
+ dev_nit++; /* I'd like a /dev/nit too one day 8) */
+ else
+ {
+ /*
+ * See if we need to copy it - that is another process also
+ * wishes to receive this type of packet.
+ */
+ for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
+ {
+ if (p1->type == pt->type)
+ {
+ pt->copy = 1; /* We will need to copy */
+ break;
+ }
+ }
+ }
+
+ /*
+ * NIT taps must go at the end or net_bh will leak!
+ */
+
+ if (pt->type == htons(ETH_P_ALL))
+ {
+ pt->next=NULL;
+ if(ptype_base==NULL)
+ ptype_base=pt;
+ else
+ {
+ /*
+ * Move to the end of the list
+ */
+ for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
+ /*
+ * Hook on the end
+ */
+ p1->next=pt;
+ }
+ }
+ else
+/*
+ * It goes on the start
+ */
+ ptype_base = pt;
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type *lpt, *pt1;
+
+ /*
+ * Keep the count of nit (Network Interface Tap) sockets correct.
+ */
+
+ if (pt->type == htons(ETH_P_ALL))
+ dev_nit--;
+
+ /*
+ * If we are first, just unhook us.
+ */
+
+ if (pt == ptype_base)
+ {
+ ptype_base = pt->next;
+ return;
+ }
+
+ lpt = NULL;
+
+ /*
+ * This is harder. What we do is to walk the list of sockets
+ * for this type. We unhook the entry, and if there is a previous
+ * entry that is copying _and_ we are not copying, (ie we are the
+ * last entry for this type) then the previous one is set to
+ * non-copying as it is now the last.
+ */
+ for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
+ {
+ if (pt1->next == pt )
+ {
+ cli();
+ if (!pt->copy && lpt)
+ lpt->copy = 0;
+ pt1->next = pt->next;
+ sti();
+ return;
+ }
+ if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
+ lpt = pt1->next;
+ }
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return(NULL);
+}
+
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ *
+ * WARNING: Both because of the way the upper layers work (that can be fixed)
+ * and because of races during a close (that can't be fixed any other way)
+ * a device may be given things to transmit EVEN WHEN IT IS DOWN. The driver
+ * MUST cope with this (eg by freeing and dumping the frame).
+ */
+
+int dev_close(struct device *dev)
+{
+ /*
+ * Only close a device if it is up.
+ */
+
+ if (dev->flags != 0)
+ {
+ int ct=0;
+ dev->flags = 0;
+ /*
+ * Call the device specific close. This cannot fail.
+ */
+ if (dev->stop)
+ dev->stop(dev);
+ /*
+ * Delete the route to the device.
+ */
+#ifdef CONFIG_INET
+ ip_rt_flush(dev);
+ arp_device_down(dev);
+#endif
+#ifdef CONFIG_IPX
+ ipxrtr_device_down(dev);
+#endif
+ /*
+ * Blank the IP addresses
+ */
+ dev->pa_addr = 0;
+ dev->pa_dstaddr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ }
+ return(0);
+}
+
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ int nitcount;
+ struct packet_type *ptype;
+ int where = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if (dev == NULL)
+ {
+ printk("dev.c: dev_queue_xmit: dev = NULL\n");
+ return;
+ }
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#ifdef CONFIG_SLAVE_BALANCING
+ save_flags(flags);
+ cli();
+ if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
+ (dev->slave->flags & IFF_UP))
+ dev=dev->slave;
+ restore_flags(flags);
+#endif
+
+ IS_SKB(skb);
+
+ skb->dev = dev;
+
+ /*
+ * This just eliminates some race conditions, but not all...
+ */
+
+ if (skb->next != NULL)
+ {
+ /*
+ * Make sure we haven't missed an interrupt.
+ */
+ printk("dev_queue_xmit: worked around a missed interrupt\n");
+ dev->hard_start_xmit(NULL, dev);
+ return;
+ }
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ where = 1;
+ }
+
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk("bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ save_flags(flags);
+ cli();
+ if (!where) {
+#ifdef CONFIG_SLAVE_BALANCING
+ skb->in_dev_queue=1;
+#endif
+ skb_queue_tail(dev->buffs + pri,skb);
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ skb = skb_dequeue(dev->buffs + pri);
+ skb_device_lock(skb); /* New buffer needs locking down */
+#ifdef CONFIG_SLAVE_BALANCING
+ skb->in_dev_queue=0;
+#endif
+ }
+ restore_flags(flags);
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if(!where)
+ {
+ for (nitcount = dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == htons(ETH_P_ALL)) {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ ptype->func(skb2, skb->dev, ptype);
+ nitcount--;
+ }
+ }
+ }
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ return;
+ }
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list its safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+#ifdef CONFIG_SLAVE_BALANCING
+ skb->in_dev_queue=1;
+ dev->pkt_queue++;
+#endif
+ skb_device_unlock(skb);
+ skb_queue_head(dev->buffs + pri,skb);
+ restore_flags(flags);
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 100)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+
+ IS_SKB(skb);
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+ mark_bh(NET_BH);
+ return;
+}
+
+
+/*
+ * The old interface to fetch a packet from a device driver.
+ * This function is the base level entry point for all drivers that
+ * want to send a packet to the upper (protocol) levels. It takes
+ * care of de-multiplexing the packet to the various modules based
+ * on their protocol ID.
+ *
+ * Return values: 1 <- exit I can't do any more
+ * 0 <- feed me more (i.e. "done", "OK").
+ *
+ * This function is OBSOLETE and should not be used by any new
+ * device.
+ */
+
+int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
+{
+ static int dropping = 0;
+ struct sk_buff *skb = NULL;
+ unsigned char *to;
+ int amount, left;
+ int len2;
+
+ if (dev == NULL || buff == NULL || len <= 0)
+ return(1);
+
+ if (flags & IN_SKBUFF)
+ {
+ skb = (struct sk_buff *) buff;
+ }
+ else
+ {
+ if (dropping)
+ {
+ if (skb_peek(&backlog) != NULL)
+ return(1);
+ printk("INET: dev_rint: no longer dropping packets.\n");
+ dropping = 0;
+ }
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ {
+ printk("dev_rint: packet dropped on %s (no memory) !\n",
+ dev->name);
+ dropping = 1;
+ return(1);
+ }
+
+ /*
+ * First we copy the packet into a buffer, and save it for later. We
+ * in effect handle the incoming data as if it were from a circular buffer
+ */
+
+ to = skb->data;
+ left = len;
+
+ len2 = len;
+ while (len2 > 0)
+ {
+ amount = min(len2, (unsigned long) dev->rmem_end -
+ (unsigned long) buff);
+ memcpy(to, buff, amount);
+ len2 -= amount;
+ left -= amount;
+ buff += amount;
+ to += amount;
+ if ((unsigned long) buff == dev->rmem_end)
+ buff = (unsigned char *) dev->rmem_start;
+ }
+ }
+
+ /*
+ * Tag the frame and kick it to the proper receive routine
+ */
+
+ skb->len = len;
+ skb->dev = dev;
+ skb->free = 1;
+
+ netif_rx(skb);
+ /*
+ * OK, all done.
+ */
+ return(0);
+}
+
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * This is a single non-reentrant routine which takes the received packet
+ * queue and throws it at the networking layers in the hope that something
+ * useful will emerge.
+ */
+
+volatile char in_bh = 0; /* Non-reentrant remember */
+
+int in_net_bh() /* Used by timer.c */
+{
+ return(in_bh==0?0:1);
+}
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue a we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void *tmp)
+{
+ struct sk_buff *skb;
+ struct packet_type *ptype;
+ unsigned short type;
+ unsigned char flag = 0;
+ int nitcount;
+
+ /*
+ * Atomically check and mark our BUSY state.
+ */
+
+ if (set_bit(1, (void*)&in_bh))
+ return;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ cli();
+
+ /*
+ * While the queue is not empty
+ */
+
+ while((skb=skb_dequeue(&backlog))!=NULL)
+ {
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ backlog_size--;
+
+ nitcount=dev_nit;
+ flag=0;
+ sti();
+
+ /*
+ * Bump the pointer to the next structure.
+ * This assumes that the basic 'skb' pointer points to
+ * the MAC header, if any (as indicated by its "length"
+ * field). Take care now!
+ */
+
+ skb->h.raw = skb->data + skb->dev->hard_header_len;
+ skb->len -= skb->dev->hard_header_len;
+
+ /*
+ * Fetch the packet protocol ID. This is also quite ugly, as
+ * it depends on the protocol driver (the interface itself) to
+ * know what the type is, or where to get it from. The Ethernet
+ * interfaces fetch the ID from the two bytes in the Ethernet MAC
+ * header (the h_proto field in struct ethhdr), but other drivers
+ * may either use the ethernet ID's or extra ones that do not
+ * clash (eg ETH_P_AX25). We could set this before we queue the
+ * frame. In fact I may change this when I have time.
+ */
+
+ type = skb->dev->type_trans(skb, skb->dev);
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * table (which is actually a linked list, but this will
+ * change soon if I get my way- FvK), and forward the packet
+ * to anyone who wants it.
+ *
+ * [FvK didn't get his way but he is right this ought to be
+ * hashed so we typically get a single hit. The speed cost
+ * here is minimal but no doubt adds up at the 4,000+ pkts/second
+ * rate we can hit flat out]
+ */
+
+ for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
+ {
+ struct sk_buff *skb2;
+
+ if (ptype->type == htons(ETH_P_ALL))
+ nitcount--;
+ if (ptype->copy || nitcount)
+ {
+ /*
+ * copy if we need to
+ */
+#ifdef OLD
+ skb2 = alloc_skb(skb->len, GFP_ATOMIC);
+ if (skb2 == NULL)
+ continue;
+ memcpy(skb2, skb, skb2->mem_len);
+ skb2->mem_addr = skb2;
+ skb2->h.raw = (unsigned char *)(
+ (unsigned long) skb2 +
+ (unsigned long) skb->h.raw -
+ (unsigned long) skb
+ );
+ skb2->free = 1;
+#else
+ skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2==NULL)
+ continue;
+#endif
+ }
+ else
+ {
+ skb2 = skb;
+ }
+
+ /*
+ * Protocol located.
+ */
+
+ flag = 1;
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ if (!flag)
+ {
+ kfree_skb(skb, FREE_WRITE);
+ }
+
+ /*
+ * Again, see if we can transmit anything now.
+ */
+
+ dev_transmit();
+ cli();
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ in_bh = 0;
+ sti();
+
+ /*
+ * One last output flush.
+ */
+
+ dev_transmit();
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ save_flags(flags);
+ /*
+ * Work the queues in priority order
+ */
+
+ for(i = 0;i < DEV_NUMBUFFS; i++)
+ {
+ /*
+ * Pull packets from the queue
+ */
+
+
+ cli();
+ while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
+ {
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = dev->flags;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+#ifdef CONFIG_SLAVE_BALANCING
+ if(dev->flags&IFF_SLAVE)
+ return -EBUSY;
+#endif
+ dev->flags = ifr.ifr_flags & (
+ IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
+#ifdef CONFIG_SLAVE_BALANCING
+ if(!(dev->flags&IFF_MASTER) && dev->slave)
+ {
+ dev->slave->flags&=~IFF_SLAVE;
+ dev->slave=NULL;
+ }
+#endif
+
+ if( dev->set_multicast_list!=NULL)
+ {
+
+ /*
+ * Has promiscuous mode been turned off
+ */
+
+ if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
+ dev->set_multicast_list(dev,0,NULL);
+
+ /*
+ * Has it been turned on
+ */
+
+ if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
+ dev->set_multicast_list(dev,-1,NULL);
+ }
+
+ /*
+ * Have we downed the interface
+ */
+
+ if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
+ {
+ ret = dev_close(dev);
+ }
+ else
+ {
+ /*
+ * Have we upped the interface
+ */
+
+ ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
+ ? dev_open(dev) : 0;
+ /*
+ * Check the flags.
+ */
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Didn't open so down the if */
+ }
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+ dev->pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ dev->family = ifr.ifr_addr.sa_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ ret = 0;
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret = 0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ ret = 0;
+ break;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ /*
+ * MTU must be positive and under the page size problem
+ */
+
+ if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
+ return -EINVAL;
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
+ ret = -EINVAL;
+ break;
+
+ case OLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */
+ memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ ret=0;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ ret=0;
+ break;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ ret=0;
+ break;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCGIFSLAVE:
+#ifdef CONFIG_SLAVE_BALANCING
+ if(dev->slave==NULL)
+ return -ENOENT;
+ strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ ret=0;
+#else
+ return -ENOENT;
+#endif
+ break;
+#ifdef CONFIG_SLAVE_BALANCING
+ case SIOCSIFSLAVE:
+ {
+
+ /*
+ * Fun game. Get the device up and the flags right without
+ * letting some scummy user confuse us.
+ */
+ unsigned long flags;
+ struct device *slave=dev_get(ifr.ifr_slave);
+ save_flags(flags);
+ if(slave==NULL)
+ {
+ return -ENODEV;
+ }
+ cli();
+ if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
+ {
+ restore_flags(flags);
+ return -EINVAL;
+ }
+ if(dev->flags&IFF_SLAVE)
+ {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ if(dev->slave!=NULL)
+ {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ if(slave->flags&IFF_SLAVE)
+ {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ dev->slave=slave;
+ slave->flags|=IFF_SLAVE;
+ dev->flags|=IFF_MASTER;
+ restore_flags(flags);
+ ret=0;
+ }
+ break;
+#endif
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+ ret = -EINVAL;
+ }
+ return(ret);
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case OLD_SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+ }
+ return -EINVAL;
+ }
+}
+
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ * The PCMCIA code may need to change this a little, and add a pair
+ * of register_inet_device() unregister_inet_device() calls. This will be
+ * needed for ethernet as modules support.
+ */
+
+void dev_init(void)
+{
+ struct device *dev, *dev2;
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dev2 = NULL;
+ for (dev = dev_base; dev != NULL; dev=dev->next)
+ {
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+
+ if (dev2 == NULL)
+ dev_base = dev->next;
+ else
+ dev2->next = dev->next;
+ }
+ else
+ {
+ dev2 = dev;
+ }
+ }
+}
diff --git a/net/inet/devinet.c b/net/inet/devinet.c
new file mode 100644
index 000000000..a0ed0b5c3
--- /dev/null
+++ b/net/inet/devinet.c
@@ -0,0 +1,214 @@
+/*
+ * NET3 IP device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "ip.h"
+#include "route.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "arp.h"
+
+/*
+ * Determine a default network mask, based on the IP address.
+ */
+
+unsigned long ip_get_mask(unsigned long addr)
+{
+ unsigned long dst;
+
+ if (addr == 0L)
+ return(0L); /* special case */
+
+ dst = ntohl(addr);
+ if (IN_CLASSA(dst))
+ return(htonl(IN_CLASSA_NET));
+ if (IN_CLASSB(dst))
+ return(htonl(IN_CLASSB_NET));
+ if (IN_CLASSC(dst))
+ return(htonl(IN_CLASSC_NET));
+
+ /*
+ * Something else, probably a multicast.
+ */
+
+ return(0);
+}
+
+/*
+ * Check the address for our address, broadcasts, etc.
+ *
+ * I intend to fix this to at the very least cache the last
+ * resolved entry.
+ */
+
+int ip_chk_addr(unsigned long addr)
+{
+ struct device *dev;
+ unsigned long mask;
+
+ /*
+ * Accept both `all ones' and `all zeros' as BROADCAST.
+ * (Support old BSD in other words). This old BSD
+ * support will go very soon as it messes other things
+ * up.
+ * Also accept `loopback broadcast' as BROADCAST.
+ */
+
+ if (addr == INADDR_ANY || addr == INADDR_BROADCAST ||
+ addr == htonl(0x7FFFFFFFL))
+ return IS_BROADCAST;
+
+ mask = ip_get_mask(addr);
+
+ /*
+ * Accept all of the `loopback' class A net.
+ */
+
+ if ((addr & mask) == htonl(0x7F000000L))
+ return IS_MYADDR;
+
+ /*
+ * OK, now check the interface addresses.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (!(dev->flags & IFF_UP))
+ continue;
+ /*
+ * If the protocol address of the device is 0 this is special
+ * and means we are address hunting (eg bootp).
+ */
+
+ if ((dev->pa_addr == 0)/* || (dev->flags&IFF_PROMISC)*/)
+ return IS_MYADDR;
+ /*
+ * Is it the exact IP address?
+ */
+
+ if (addr == dev->pa_addr)
+ return IS_MYADDR;
+ /*
+ * Is it our broadcast address?
+ */
+
+ if ((dev->flags & IFF_BROADCAST) && addr == dev->pa_brdaddr)
+ return IS_BROADCAST;
+ /*
+ * Nope. Check for a subnetwork broadcast.
+ */
+
+ if (((addr ^ dev->pa_addr) & dev->pa_mask) == 0)
+ {
+ if ((addr & ~dev->pa_mask) == 0)
+ return IS_BROADCAST;
+ if ((addr & ~dev->pa_mask) == ~dev->pa_mask)
+ return IS_BROADCAST;
+ }
+
+ /*
+ * Nope. Check for Network broadcast.
+ */
+
+ if (((addr ^ dev->pa_addr) & mask) == 0)
+ {
+ if ((addr & ~mask) == 0)
+ return IS_BROADCAST;
+ if ((addr & ~mask) == ~mask)
+ return IS_BROADCAST;
+ }
+ }
+ if(IN_MULTICAST(ntohl(addr)))
+ return IS_MULTICAST;
+ return 0; /* no match at all */
+}
+
+
+/*
+ * Retrieve our own address.
+ *
+ * Because the loopback address (127.0.0.1) is already recognized
+ * automatically, we can use the loopback interface's address as
+ * our "primary" interface. This is the address used by IP et
+ * al when it doesn't know which address to use (i.e. it does not
+ * yet know from or to which interface to go...).
+ */
+
+unsigned long ip_my_addr(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags & IFF_LOOPBACK)
+ return(dev->pa_addr);
+ }
+ return(0);
+}
+
+/*
+ * Find an interface that can handle addresses for a certain address.
+ *
+ * This needs optimising, since its relatively trivial to collapse
+ * the two loops into one.
+ */
+
+struct device * ip_dev_check(unsigned long addr)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev; dev = dev->next)
+ {
+ if (!(dev->flags & IFF_UP))
+ continue;
+ if (!(dev->flags & IFF_POINTOPOINT))
+ continue;
+ if (addr != dev->pa_dstaddr)
+ continue;
+ return dev;
+ }
+ for (dev = dev_base; dev; dev = dev->next)
+ {
+ if (!(dev->flags & IFF_UP))
+ continue;
+ if (dev->flags & IFF_POINTOPOINT)
+ continue;
+ if (dev->pa_mask & (addr ^ dev->pa_addr))
+ continue;
+ return dev;
+ }
+ return NULL;
+}
diff --git a/net/inet/eth.c b/net/inet/eth.c
new file mode 100644
index 000000000..550e0f8f0
--- /dev/null
+++ b/net/inet/eth.c
@@ -0,0 +1,196 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include "arp.h"
+
+void eth_setup(char *str, int *ints)
+{
+ struct device *d = dev_base;
+
+ if (!str || !*str)
+ return;
+ while (d)
+ {
+ if (!strcmp(str,d->name))
+ {
+ if (ints[0] > 0)
+ d->irq=ints[1];
+ if (ints[0] > 1)
+ d->base_addr=ints[2];
+ if (ints[0] > 2)
+ d->mem_start=ints[3];
+ if (ints[0] > 3)
+ d->mem_end=ints[4];
+ break;
+ }
+ d=d->next;
+ }
+}
+
+
+/*
+ * Create the Ethernet MAC header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ */
+
+int eth_header(unsigned char *buff, struct device *dev, unsigned short type,
+ void *daddr, void *saddr, unsigned len,
+ struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)buff;
+
+ /*
+ * Set the protocol type. For a packet of type ETH_P_802_3 we put the length
+ * in here instead. It is up to the 802.2 layer to carry protocol information.
+ */
+
+ if(type!=ETH_P_802_3)
+ eth->h_proto = htons(type);
+ else
+ eth->h_proto = htons(len);
+
+ /*
+ * Set the source hardware address.
+ */
+
+ if(saddr)
+ memcpy(eth->h_source,saddr,dev->addr_len);
+ else
+ memcpy(eth->h_source,dev->dev_addr,dev->addr_len);
+
+ /*
+ * Anyway, the loopback-device should never use this function...
+ */
+
+ if (dev->flags & IFF_LOOPBACK)
+ {
+ memset(eth->h_dest, 0, dev->addr_len);
+ return(dev->hard_header_len);
+ }
+
+ if(daddr)
+ {
+ memcpy(eth->h_dest,daddr,dev->addr_len);
+ return dev->hard_header_len;
+ }
+
+ return -dev->hard_header_len;
+}
+
+
+/*
+ * Rebuild the Ethernet MAC header. This is called after an ARP
+ * (or in future other address resolution) has completed on this
+ * sk_buff. We now let ARP fill in the other fields.
+ */
+
+int eth_rebuild_header(void *buff, struct device *dev, unsigned long dst,
+ struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)buff;
+
+ /*
+ * Only ARP/IP is currently supported
+ */
+
+ if(eth->h_proto != htons(ETH_P_IP))
+ {
+ printk("eth_rebuild_header: Don't know how to resolve type %d addresses?\n",(int)eth->h_proto);
+ memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
+ return 0;
+ }
+
+ /*
+ * Try and get ARP to resolve the header.
+ */
+#ifdef CONFIG_INET
+ return arp_find(eth->h_dest, dst, dev, dev->pa_addr, skb)? 1 : 0;
+#else
+ return 0;
+#endif
+}
+
+
+/*
+ * Determine the packet's protocol ID. The rule here is that we
+ * assume 802.3 if the type field is short enough to be a length.
+ * This is normal practice and works for any 'now in use' protocol.
+ */
+
+unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev)
+{
+ struct ethhdr *eth = (struct ethhdr *) skb->data;
+ unsigned char *rawp;
+
+ if(*eth->h_dest&1)
+ {
+ if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
+ skb->pkt_type=PACKET_BROADCAST;
+ else
+ skb->pkt_type=PACKET_MULTICAST;
+ }
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN))
+ skb->pkt_type=PACKET_OTHERHOST;
+ }
+
+ if (ntohs(eth->h_proto) >= 1536)
+ return eth->h_proto;
+
+ rawp = (unsigned char *)(eth + 1);
+
+ if (*(unsigned short *)rawp == 0xFFFF)
+ return htons(ETH_P_802_3);
+ if (*(unsigned short *)rawp == 0xAAAA)
+ return htons(ETH_P_SNAP);
+
+ return htons(ETH_P_802_2);
+}
diff --git a/net/inet/eth.h b/net/inet/eth.h
new file mode 100644
index 000000000..f8fed44ed
--- /dev/null
+++ b/net/inet/eth.h
@@ -0,0 +1,35 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ETH_H
+#define _ETH_H
+
+
+#include <linux/if_ether.h>
+
+
+extern char *eth_print(unsigned char *ptr);
+extern void eth_dump(struct ethhdr *eth);
+extern int eth_header(unsigned char *buff, struct device *dev,
+ unsigned short type, unsigned long daddr,
+ unsigned long saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev);
+extern void eth_add_arp(unsigned long addr, struct sk_buff *skb,
+ struct device *dev);
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+
+#endif /* _ETH_H */
diff --git a/net/inet/icmp.c b/net/inet/icmp.c
new file mode 100644
index 000000000..ad2ac801b
--- /dev/null
+++ b/net/inet/icmp.c
@@ -0,0 +1,748 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Internet Control Message Protocol (ICMP)
+ *
+ * Version: @(#)icmp.c 1.0.11 06/02/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Alan Cox : Generic queue usage.
+ * Gerhard Koerting: ICMP addressing corrected
+ * Alan Cox : Use tos/ttl settings
+ * Alan Cox : Protocol violations
+ * Alan Cox : SNMP Statistics
+ * Alan Cox : Routing errors
+ * Alan Cox : Changes for newer routing code
+ * Alan Cox : Removed old debugging junk
+ * Alan Cox : Fixed the ICMP error status of net/host unreachable
+ * Gerhard Koerting : Fixed broadcast ping properly
+ * Ulrich Kunitz : Fixed ICMP timestamp reply
+ * A.N.Kuznetsov : Multihoming fixes.
+ * Laco Rusnak : Multihoming fixes.
+ * Alan Cox : Tightened up icmp_send().
+ *
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include "snmp.h"
+#include "ip.h"
+#include "route.h"
+#include "protocol.h"
+#include "icmp.h"
+#include "tcp.h"
+#include "snmp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+
+
+#define min(a,b) ((a)<(b)?(a):(b))
+
+
+/*
+ * Statistics
+ */
+
+struct icmp_mib icmp_statistics={0,};
+
+
+/* An array of errno for error messages from dest unreach. */
+struct icmp_err icmp_err_convert[] = {
+ { ENETUNREACH, 0 }, /* ICMP_NET_UNREACH */
+ { EHOSTUNREACH, 0 }, /* ICMP_HOST_UNREACH */
+ { ENOPROTOOPT, 1 }, /* ICMP_PROT_UNREACH */
+ { ECONNREFUSED, 1 }, /* ICMP_PORT_UNREACH */
+ { EOPNOTSUPP, 0 }, /* ICMP_FRAG_NEEDED */
+ { EOPNOTSUPP, 0 }, /* ICMP_SR_FAILED */
+ { ENETUNREACH, 1 }, /* ICMP_NET_UNKNOWN */
+ { EHOSTDOWN, 1 }, /* ICMP_HOST_UNKNOWN */
+ { ENONET, 1 }, /* ICMP_HOST_ISOLATED */
+ { ENETUNREACH, 1 }, /* ICMP_NET_ANO */
+ { EHOSTUNREACH, 1 }, /* ICMP_HOST_ANO */
+ { EOPNOTSUPP, 0 }, /* ICMP_NET_UNR_TOS */
+ { EOPNOTSUPP, 0 } /* ICMP_HOST_UNR_TOS */
+};
+
+
+/*
+ * Send an ICMP message in response to a situation
+ *
+ * Fixme: Fragment handling is wrong really.
+ */
+
+void icmp_send(struct sk_buff *skb_in, int type, int code, struct device *dev)
+{
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ int offset;
+ struct icmphdr *icmph;
+ int len;
+ struct device *ndev=NULL; /* Make this =dev to force replies on the same interface */
+ unsigned long our_addr;
+ int atype;
+
+ /*
+ * Find the original IP header.
+ */
+
+ iph = (struct iphdr *) (skb_in->data + dev->hard_header_len);
+
+ /*
+ * No replies to MAC multicast
+ */
+
+ if(skb_in->pkt_type!=PACKET_HOST)
+ return;
+
+ /*
+ * No replies to IP multicasting
+ */
+
+ atype=ip_chk_addr(iph->daddr);
+ if(atype==IS_BROADCAST || IN_MULTICAST(iph->daddr))
+ return;
+
+ /*
+ * Only reply to first fragment.
+ */
+
+ if(ntohs(iph->frag_off)&IP_OFFSET)
+ return;
+
+ /*
+ * We must NEVER NEVER send an ICMP error to an ICMP error message
+ */
+
+ if(type==ICMP_DEST_UNREACH||type==ICMP_REDIRECT||type==ICMP_SOURCE_QUENCH||type==ICMP_TIME_EXCEEDED)
+ {
+ if(iph->protocol==IPPROTO_ICMP)
+ return;
+
+ }
+ icmp_statistics.IcmpOutMsgs++;
+
+ /*
+ * This needs a tidy.
+ */
+
+ switch(type)
+ {
+ case ICMP_DEST_UNREACH:
+ icmp_statistics.IcmpOutDestUnreachs++;
+ break;
+ case ICMP_SOURCE_QUENCH:
+ icmp_statistics.IcmpOutSrcQuenchs++;
+ break;
+ case ICMP_REDIRECT:
+ icmp_statistics.IcmpOutRedirects++;
+ break;
+ case ICMP_ECHO:
+ icmp_statistics.IcmpOutEchos++;
+ break;
+ case ICMP_ECHOREPLY:
+ icmp_statistics.IcmpOutEchoReps++;
+ break;
+ case ICMP_TIME_EXCEEDED:
+ icmp_statistics.IcmpOutTimeExcds++;
+ break;
+ case ICMP_PARAMETERPROB:
+ icmp_statistics.IcmpOutParmProbs++;
+ break;
+ case ICMP_TIMESTAMP:
+ icmp_statistics.IcmpOutTimestamps++;
+ break;
+ case ICMP_TIMESTAMPREPLY:
+ icmp_statistics.IcmpOutTimestampReps++;
+ break;
+ case ICMP_ADDRESS:
+ icmp_statistics.IcmpOutAddrMasks++;
+ break;
+ case ICMP_ADDRESSREPLY:
+ icmp_statistics.IcmpOutAddrMaskReps++;
+ break;
+ }
+ /*
+ * Get some memory for the reply.
+ */
+
+ len = dev->hard_header_len + sizeof(struct iphdr) + sizeof(struct icmphdr) +
+ sizeof(struct iphdr) + 8; /* amount of header to return */
+
+ skb = (struct sk_buff *) alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ {
+ icmp_statistics.IcmpOutErrors++;
+ return;
+ }
+ skb->free = 1;
+
+ /*
+ * Build Layer 2-3 headers for message back to source.
+ */
+
+ our_addr = dev->pa_addr;
+ if (iph->daddr != our_addr && ip_chk_addr(iph->daddr) == IS_MYADDR)
+ our_addr = iph->daddr;
+ offset = ip_build_header(skb, our_addr, iph->saddr,
+ &ndev, IPPROTO_ICMP, NULL, len,
+ skb_in->ip_hdr->tos,255);
+ if (offset < 0)
+ {
+ icmp_statistics.IcmpOutErrors++;
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Re-adjust length according to actual IP header size.
+ */
+
+ skb->len = offset + sizeof(struct icmphdr) + sizeof(struct iphdr) + 8;
+
+ /*
+ * Fill in the frame
+ */
+
+ icmph = (struct icmphdr *) (skb->data + offset);
+ icmph->type = type;
+ icmph->code = code;
+ icmph->checksum = 0;
+ icmph->un.gateway = 0;
+ memcpy(icmph + 1, iph, sizeof(struct iphdr) + 8);
+
+ icmph->checksum = ip_compute_csum((unsigned char *)icmph,
+ sizeof(struct icmphdr) + sizeof(struct iphdr) + 8);
+
+ /*
+ * Send it and free it once sent.
+ */
+ ip_queue_xmit(NULL, ndev, skb, 1);
+}
+
+
+/*
+ * Handle ICMP_UNREACH and ICMP_QUENCH.
+ */
+
+static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb)
+{
+ struct inet_protocol *ipprot;
+ struct iphdr *iph;
+ unsigned char hash;
+ int err;
+
+ err = (icmph->type << 8) | icmph->code;
+ iph = (struct iphdr *) (icmph + 1);
+
+ switch(icmph->code & 7)
+ {
+ case ICMP_NET_UNREACH:
+ break;
+ case ICMP_HOST_UNREACH:
+ break;
+ case ICMP_PROT_UNREACH:
+ printk("ICMP: %s:%d: protocol unreachable.\n",
+ in_ntoa(iph->daddr), ntohs(iph->protocol));
+ break;
+ case ICMP_PORT_UNREACH:
+ break;
+ case ICMP_FRAG_NEEDED:
+ printk("ICMP: %s: fragmentation needed and DF set.\n",
+ in_ntoa(iph->daddr));
+ break;
+ case ICMP_SR_FAILED:
+ printk("ICMP: %s: Source Route Failed.\n", in_ntoa(iph->daddr));
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Get the protocol(s).
+ */
+
+ hash = iph->protocol & (MAX_INET_PROTOS -1);
+
+ /*
+ * This can't change while we are doing it.
+ */
+
+ ipprot = (struct inet_protocol *) inet_protos[hash];
+ while(ipprot != NULL)
+ {
+ struct inet_protocol *nextip;
+
+ nextip = (struct inet_protocol *) ipprot->next;
+
+ /*
+ * Pass it off to everyone who wants it.
+ */
+ if (iph->protocol == ipprot->protocol && ipprot->err_handler)
+ {
+ ipprot->err_handler(err, (unsigned char *)(icmph + 1),
+ iph->daddr, iph->saddr, ipprot);
+ }
+
+ ipprot = nextip;
+ }
+ kfree_skb(skb, FREE_READ);
+}
+
+
+/*
+ * Handle ICMP_REDIRECT.
+ */
+
+static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb,
+ struct device *dev, unsigned long source)
+{
+ struct rtable *rt;
+ struct iphdr *iph;
+ unsigned long ip;
+
+ /*
+ * Get the copied header of the packet that caused the redirect
+ */
+
+ iph = (struct iphdr *) (icmph + 1);
+ ip = iph->daddr;
+
+ switch(icmph->code & 7)
+ {
+ case ICMP_REDIR_NET:
+ /*
+ * This causes a problem with subnetted networks. What we should do
+ * is use ICMP_ADDRESS to get the subnet mask of the problem route
+ * and set both. But we don't..
+ */
+#ifdef not_a_good_idea
+ ip_rt_add((RTF_DYNAMIC | RTF_MODIFIED | RTF_GATEWAY),
+ ip, 0, icmph->un.gateway, dev,0, 0);
+ break;
+#endif
+ case ICMP_REDIR_HOST:
+ /*
+ * Add better route to host.
+ * But first check that the redirect
+ * comes from the old gateway..
+ */
+ rt = ip_rt_route(ip, NULL, NULL);
+ if (!rt)
+ break;
+ if (rt->rt_gateway != source)
+ break;
+ printk("redirect from %s\n", in_ntoa(source));
+ ip_rt_add((RTF_DYNAMIC | RTF_MODIFIED | RTF_HOST | RTF_GATEWAY),
+ ip, 0, icmph->un.gateway, dev,0, 0);
+ break;
+ case ICMP_REDIR_NETTOS:
+ case ICMP_REDIR_HOSTTOS:
+ printk("ICMP: cannot handle TOS redirects yet!\n");
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Discard the original packet
+ */
+
+ kfree_skb(skb, FREE_READ);
+}
+
+
+/*
+ * Handle ICMP_ECHO ("ping") requests.
+ */
+
+static void icmp_echo(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev,
+ unsigned long saddr, unsigned long daddr, int len,
+ struct options *opt)
+{
+ struct icmphdr *icmphr;
+ struct sk_buff *skb2;
+ struct device *ndev=NULL;
+ int size, offset;
+
+ icmp_statistics.IcmpOutEchoReps++;
+ icmp_statistics.IcmpOutMsgs++;
+
+ size = dev->hard_header_len + 64 + len;
+ skb2 = alloc_skb(size, GFP_ATOMIC);
+
+ if (skb2 == NULL)
+ {
+ icmp_statistics.IcmpOutErrors++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+ skb2->free = 1;
+
+ /* Build Layer 2-3 headers for message back to source */
+ offset = ip_build_header(skb2, daddr, saddr, &ndev,
+ IPPROTO_ICMP, opt, len, skb->ip_hdr->tos,255);
+ if (offset < 0)
+ {
+ icmp_statistics.IcmpOutErrors++;
+ printk("ICMP: Could not build IP Header for ICMP ECHO Response\n");
+ kfree_skb(skb2,FREE_WRITE);
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Re-adjust length according to actual IP header size.
+ */
+
+ skb2->len = offset + len;
+
+ /*
+ * Build ICMP_ECHO Response message.
+ */
+ icmphr = (struct icmphdr *) (skb2->data + offset);
+ memcpy((char *) icmphr, (char *) icmph, len);
+ icmphr->type = ICMP_ECHOREPLY;
+ icmphr->code = 0;
+ icmphr->checksum = 0;
+ icmphr->checksum = ip_compute_csum((unsigned char *)icmphr, len);
+
+ /*
+ * Ship it out - free it when done
+ */
+ ip_queue_xmit((struct sock *)NULL, ndev, skb2, 1);
+
+ /*
+ * Free the received frame
+ */
+
+ kfree_skb(skb, FREE_READ);
+}
+
+/*
+ * Handle ICMP Timestamp requests.
+ */
+
+static void icmp_timestamp(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev,
+ unsigned long saddr, unsigned long daddr, int len,
+ struct options *opt)
+{
+ struct icmphdr *icmphr;
+ struct sk_buff *skb2;
+ int size, offset;
+ unsigned long *timeptr, midtime;
+ struct device *ndev=NULL;
+
+ if (len != 20)
+ {
+ printk(
+ "ICMP: Size (%d) of ICMP_TIMESTAMP request should be 20!\n",
+ len);
+ icmp_statistics.IcmpInErrors++;
+#if 1
+ /* correct answers are possible for everything >= 12 */
+ if (len < 12)
+#endif
+ return;
+ }
+
+ size = dev->hard_header_len + 84;
+
+ if (! (skb2 = alloc_skb(size, GFP_ATOMIC)))
+ {
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ icmp_statistics.IcmpOutErrors++;
+ return;
+ }
+ skb2->free = 1;
+
+/*
+ * Build Layer 2-3 headers for message back to source
+ */
+
+ offset = ip_build_header(skb2, daddr, saddr, &ndev, IPPROTO_ICMP, opt, len,
+ skb->ip_hdr->tos, 255);
+ if (offset < 0)
+ {
+ printk("ICMP: Could not build IP Header for ICMP TIMESTAMP Response\n");
+ kfree_skb(skb2, FREE_WRITE);
+ kfree_skb(skb, FREE_READ);
+ icmp_statistics.IcmpOutErrors++;
+ return;
+ }
+
+ /*
+ * Re-adjust length according to actual IP header size.
+ */
+ skb2->len = offset + 20;
+
+ /*
+ * Build ICMP_TIMESTAMP Response message.
+ */
+
+ icmphr = (struct icmphdr *) ((char *) (skb2 + 1) + offset);
+ memcpy((char *) icmphr, (char *) icmph, 12);
+ icmphr->type = ICMP_TIMESTAMPREPLY;
+ icmphr->code = icmphr->checksum = 0;
+
+ /* fill in the current time as ms since midnight UT: */
+ midtime = (xtime.tv_sec % 86400) * 1000 + xtime.tv_usec / 1000;
+ timeptr = (unsigned long *) (icmphr + 1);
+ /*
+ * the originate timestamp (timeptr [0]) is still in the copy:
+ */
+ timeptr [1] = timeptr [2] = htonl(midtime);
+
+ icmphr->checksum = ip_compute_csum((unsigned char *) icmphr, 20);
+
+ /*
+ * Ship it out - free it when done
+ */
+
+ ip_queue_xmit((struct sock *) NULL, ndev, skb2, 1);
+ icmp_statistics.IcmpOutTimestampReps++;
+ kfree_skb(skb, FREE_READ);
+}
+
+
+
+
+/*
+ * Handle the ICMP INFORMATION REQUEST.
+ */
+
+static void icmp_info(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev,
+ unsigned long saddr, unsigned long daddr, int len,
+ struct options *opt)
+{
+ /* Obsolete */
+ kfree_skb(skb, FREE_READ);
+}
+
+
+/*
+ * Handle ICMP_ADDRESS_MASK requests.
+ */
+
+static void icmp_address(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev,
+ unsigned long saddr, unsigned long daddr, int len,
+ struct options *opt)
+{
+ struct icmphdr *icmphr;
+ struct sk_buff *skb2;
+ int size, offset;
+ struct device *ndev=NULL;
+
+ icmp_statistics.IcmpOutMsgs++;
+ icmp_statistics.IcmpOutAddrMaskReps++;
+
+ size = dev->hard_header_len + 64 + len;
+ skb2 = alloc_skb(size, GFP_ATOMIC);
+ if (skb2 == NULL)
+ {
+ icmp_statistics.IcmpOutErrors++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+ skb2->free = 1;
+
+ /*
+ * Build Layer 2-3 headers for message back to source
+ */
+
+ offset = ip_build_header(skb2, daddr, saddr, &ndev,
+ IPPROTO_ICMP, opt, len, skb->ip_hdr->tos,255);
+ if (offset < 0)
+ {
+ icmp_statistics.IcmpOutErrors++;
+ printk("ICMP: Could not build IP Header for ICMP ADDRESS Response\n");
+ kfree_skb(skb2,FREE_WRITE);
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Re-adjust length according to actual IP header size.
+ */
+
+ skb2->len = offset + len;
+
+ /*
+ * Build ICMP ADDRESS MASK Response message.
+ */
+
+ icmphr = (struct icmphdr *) (skb2->data + offset);
+ icmphr->type = ICMP_ADDRESSREPLY;
+ icmphr->code = 0;
+ icmphr->checksum = 0;
+ icmphr->un.echo.id = icmph->un.echo.id;
+ icmphr->un.echo.sequence = icmph->un.echo.sequence;
+ memcpy((char *) (icmphr + 1), (char *) &dev->pa_mask, sizeof(dev->pa_mask));
+
+ icmphr->checksum = ip_compute_csum((unsigned char *)icmphr, len);
+
+ /* Ship it out - free it when done */
+ ip_queue_xmit((struct sock *)NULL, ndev, skb2, 1);
+
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+}
+
+
+/*
+ * Deal with incoming ICMP packets.
+ */
+
+int icmp_rcv(struct sk_buff *skb1, struct device *dev, struct options *opt,
+ unsigned long daddr, unsigned short len,
+ unsigned long saddr, int redo, struct inet_protocol *protocol)
+{
+ struct icmphdr *icmph;
+ unsigned char *buff;
+
+ /*
+ * Drop broadcast packets. IP has done a broadcast check and ought one day
+ * to pass on that information.
+ */
+
+ icmp_statistics.IcmpInMsgs++;
+
+
+ /*
+ * Grab the packet as an icmp object
+ */
+
+ buff = skb1->h.raw;
+ icmph = (struct icmphdr *) buff;
+
+ /*
+ * Validate the packet first
+ */
+
+ if (ip_compute_csum((unsigned char *) icmph, len))
+ {
+ /* Failed checksum! */
+ icmp_statistics.IcmpInErrors++;
+ printk("ICMP: failed checksum from %s!\n", in_ntoa(saddr));
+ kfree_skb(skb1, FREE_READ);
+ return(0);
+ }
+
+ /*
+ * Parse the ICMP message
+ */
+
+ if (ip_chk_addr(daddr) == IS_BROADCAST)
+ {
+ if (icmph->type != ICMP_ECHO)
+ {
+ icmp_statistics.IcmpInErrors++;
+ kfree_skb(skb1, FREE_READ);
+ return(0);
+ }
+ daddr=dev->pa_addr;
+ }
+
+ switch(icmph->type)
+ {
+ case ICMP_TIME_EXCEEDED:
+ icmp_statistics.IcmpInTimeExcds++;
+ icmp_unreach(icmph, skb1);
+ return 0;
+ case ICMP_DEST_UNREACH:
+ icmp_statistics.IcmpInDestUnreachs++;
+ icmp_unreach(icmph, skb1);
+ return 0;
+ case ICMP_SOURCE_QUENCH:
+ icmp_statistics.IcmpInSrcQuenchs++;
+ icmp_unreach(icmph, skb1);
+ return(0);
+ case ICMP_REDIRECT:
+ icmp_statistics.IcmpInRedirects++;
+ icmp_redirect(icmph, skb1, dev, saddr);
+ return(0);
+ case ICMP_ECHO:
+ icmp_statistics.IcmpInEchos++;
+ icmp_echo(icmph, skb1, dev, saddr, daddr, len, opt);
+ return 0;
+ case ICMP_ECHOREPLY:
+ icmp_statistics.IcmpInEchoReps++;
+ kfree_skb(skb1, FREE_READ);
+ return(0);
+ case ICMP_TIMESTAMP:
+ icmp_statistics.IcmpInTimestamps++;
+ icmp_timestamp(icmph, skb1, dev, saddr, daddr, len, opt);
+ return 0;
+ case ICMP_TIMESTAMPREPLY:
+ icmp_statistics.IcmpInTimestampReps++;
+ kfree_skb(skb1,FREE_READ);
+ return 0;
+ /* INFO is obsolete and doesn't even feature in the SNMP stats */
+ case ICMP_INFO_REQUEST:
+ icmp_info(icmph, skb1, dev, saddr, daddr, len, opt);
+ return 0;
+ case ICMP_INFO_REPLY:
+ skb1->sk = NULL;
+ kfree_skb(skb1, FREE_READ);
+ return(0);
+ case ICMP_ADDRESS:
+ icmp_statistics.IcmpInAddrMasks++;
+ icmp_address(icmph, skb1, dev, saddr, daddr, len, opt);
+ return 0;
+ case ICMP_ADDRESSREPLY:
+ /*
+ * We ought to set our netmask on receiving this, but
+ * experience shows its a waste of effort.
+ */
+ icmp_statistics.IcmpInAddrMaskReps++;
+ kfree_skb(skb1, FREE_READ);
+ return(0);
+ default:
+ icmp_statistics.IcmpInErrors++;
+ kfree_skb(skb1, FREE_READ);
+ return(0);
+ }
+ /*NOTREACHED*/
+ kfree_skb(skb1, FREE_READ);
+ return(-1);
+}
+
+
+/*
+ * Perform any ICMP-related I/O control requests.
+ * [to vanish soon]
+ */
+
+int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ switch(cmd)
+ {
+ default:
+ return(-EINVAL);
+ }
+ return(0);
+}
diff --git a/net/inet/icmp.h b/net/inet/icmp.h
new file mode 100644
index 000000000..1067d8380
--- /dev/null
+++ b/net/inet/icmp.h
@@ -0,0 +1,38 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP module.
+ *
+ * Version: @(#)icmp.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ICMP_H
+#define _ICMP_H
+
+#include <linux/icmp.h>
+
+
+extern struct icmp_err icmp_err_convert[];
+extern struct icmp_mib icmp_statistics;
+
+
+extern void icmp_send(struct sk_buff *skb_in, int type, int code,
+ struct device *dev);
+extern int icmp_rcv(struct sk_buff *skb1, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr,
+ int redo, struct inet_protocol *protocol);
+
+extern int icmp_ioctl(struct sock *sk, int cmd,
+ unsigned long arg);
+
+#endif /* _ICMP_H */
diff --git a/net/inet/ip.c b/net/inet/ip.c
new file mode 100644
index 000000000..fe48ca8eb
--- /dev/null
+++ b/net/inet/ip.c
@@ -0,0 +1,2028 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * The Internet Protocol (IP) module.
+ *
+ * Version: @(#)ip.c 1.0.16b 9/1/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Alan Cox : Commented a couple of minor bits of surplus code
+ * Alan Cox : Undefining IP_FORWARD doesn't include the code
+ * (just stops a compiler warning).
+ * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
+ * are junked rather than corrupting things.
+ * Alan Cox : Frames to bad broadcast subnets are dumped
+ * We used to process them non broadcast and
+ * boy could that cause havoc.
+ * Alan Cox : ip_forward sets the free flag on the
+ * new frame it queues. Still crap because
+ * it copies the frame but at least it
+ * doesn't eat memory too.
+ * Alan Cox : Generic queue code and memory fixes.
+ * Fred Van Kempen : IP fragment support (borrowed from NET2E)
+ * Gerhard Koerting: Forward fragmented frames correctly.
+ * Gerhard Koerting: Fixes to my fix of the above 8-).
+ * Gerhard Koerting: IP interface addressing fix.
+ * Linus Torvalds : More robustness checks
+ * Alan Cox : Even more checks: Still not as robust as it ought to be
+ * Alan Cox : Save IP header pointer for later
+ * Alan Cox : ip option setting
+ * Alan Cox : Use ip_tos/ip_ttl settings
+ * Alan Cox : Fragmentation bogosity removed
+ * (Thanks to Mark.Bush@prg.ox.ac.uk)
+ * Dmitry Gorodchanin : Send of a raw packet crash fix.
+ * Alan Cox : Silly ip bug when an overlength
+ * fragment turns up. Now frees the
+ * queue.
+ * Linus Torvalds/ : Memory leakage on fragmentation
+ * Alan Cox : handling.
+ * Gerhard Koerting: Forwarding uses IP priority hints
+ * Teemu Rantanen : Fragment problems.
+ * Alan Cox : General cleanup, comments and reformat
+ * Alan Cox : SNMP statistics
+ * Alan Cox : BSD address rule semantics. Also see
+ * UDP as there is a nasty checksum issue
+ * if you do things the wrong way.
+ * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
+ * Alan Cox : IP options adjust sk->priority.
+ * Pedro Roque : Fix mtu/length error in ip_forward.
+ * Alan Cox : Avoid ip_chk_addr when possible.
+ *
+ * To Fix:
+ * IP option processing is mostly not needed. ip_forward needs to know about routing rules
+ * and time stamp but that's about all. Use the route mtu field here too
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "snmp.h"
+#include "ip.h"
+#include "protocol.h"
+#include "route.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "arp.h"
+#include "icmp.h"
+
+#define CONFIG_IP_DEFRAG
+
+extern int last_retran;
+extern void sort_send(struct sock *sk);
+
+#define min(a,b) ((a)<(b)?(a):(b))
+#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+
+/*
+ * SNMP management statistics
+ */
+
+struct ip_mib ip_statistics={1,64,}; /* Forwarding=Yes, Default TTL=64 */
+
+/*
+ * Handle the issuing of an ioctl() request
+ * for the ip device. This is scheduled to
+ * disappear
+ */
+
+int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ switch(cmd)
+ {
+ default:
+ return(-EINVAL);
+ }
+}
+
+
+/* these two routines will do routing. */
+
+static void
+strict_route(struct iphdr *iph, struct options *opt)
+{
+}
+
+
+static void
+loose_route(struct iphdr *iph, struct options *opt)
+{
+}
+
+
+
+
+/* This routine will check to see if we have lost a gateway. */
+void
+ip_route_check(unsigned long daddr)
+{
+}
+
+
+#if 0
+/* this routine puts the options at the end of an ip header. */
+static int
+build_options(struct iphdr *iph, struct options *opt)
+{
+ unsigned char *ptr;
+ /* currently we don't support any options. */
+ ptr = (unsigned char *)(iph+1);
+ *ptr = 0;
+ return (4);
+}
+#endif
+
+
+/*
+ * Take an skb, and fill in the MAC header.
+ */
+
+static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
+{
+ int mac = 0;
+
+ skb->dev = dev;
+ skb->arp = 1;
+ if (dev->hard_header)
+ {
+ /*
+ * Build a hardware header. Source address is our mac, destination unknown
+ * (rebuild header will sort this out)
+ */
+ mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
+ if (mac < 0)
+ {
+ mac = -mac;
+ skb->arp = 0;
+ skb->raddr = daddr; /* next routing address */
+ }
+ }
+ return mac;
+}
+
+int ip_id_count = 0;
+
+/*
+ * This routine builds the appropriate hardware/IP headers for
+ * the routine. It assumes that if *dev != NULL then the
+ * protocol knows what it's doing, otherwise it uses the
+ * routing/ARP tables to select a device struct.
+ */
+int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
+ struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
+{
+ static struct options optmem;
+ struct iphdr *iph;
+ struct rtable *rt;
+ unsigned char *buff;
+ unsigned long raddr;
+ int tmp;
+ unsigned long src;
+
+ buff = skb->data;
+
+ /*
+ * See if we need to look up the device.
+ */
+
+ if (*dev == NULL)
+ {
+ if(skb->localroute)
+ rt = ip_rt_local(daddr, &optmem, &src);
+ else
+ rt = ip_rt_route(daddr, &optmem, &src);
+ if (rt == NULL)
+ {
+ ip_statistics.IpOutNoRoutes++;
+ return(-ENETUNREACH);
+ }
+
+ *dev = rt->rt_dev;
+ /*
+ * If the frame is from us and going off machine it MUST MUST MUST
+ * have the output device ip address and never the loopback
+ */
+ if (LOOPBACK(saddr) && !LOOPBACK(daddr))
+ saddr = src;/*rt->rt_dev->pa_addr;*/
+ raddr = rt->rt_gateway;
+
+ opt = &optmem;
+ }
+ else
+ {
+ /*
+ * We still need the address of the first hop.
+ */
+ if(skb->localroute)
+ rt = ip_rt_local(daddr, &optmem, &src);
+ else
+ rt = ip_rt_route(daddr, &optmem, &src);
+ /*
+ * If the frame is from us and going off machine it MUST MUST MUST
+ * have the output device ip address and never the loopback
+ */
+ if (LOOPBACK(saddr) && !LOOPBACK(daddr))
+ saddr = src;/*rt->rt_dev->pa_addr;*/
+
+ raddr = (rt == NULL) ? 0 : rt->rt_gateway;
+ }
+
+ /*
+ * No source addr so make it our addr
+ */
+ if (saddr == 0)
+ saddr = src;
+
+ /*
+ * No gateway so aim at the real destination
+ */
+ if (raddr == 0)
+ raddr = daddr;
+
+ /*
+ * Now build the MAC header.
+ */
+
+ tmp = ip_send(skb, raddr, len, *dev, saddr);
+ buff += tmp;
+ len -= tmp;
+
+ /*
+ * Book keeping
+ */
+
+ skb->dev = *dev;
+ skb->saddr = saddr;
+ if (skb->sk)
+ skb->sk->saddr = saddr;
+
+ /*
+ * Now build the IP header.
+ */
+
+ /*
+ * If we are using IPPROTO_RAW, then we don't need an IP header, since
+ * one is being supplied to us by the user
+ */
+
+ if(type == IPPROTO_RAW)
+ return (tmp);
+
+ iph = (struct iphdr *)buff;
+ iph->version = 4;
+ iph->tos = tos;
+ iph->frag_off = 0;
+ iph->ttl = ttl;
+ iph->daddr = daddr;
+ iph->saddr = saddr;
+ iph->protocol = type;
+ iph->ihl = 5;
+
+ /* Setup the IP options. */
+#ifdef Not_Yet_Avail
+ build_options(iph, opt);
+#endif
+
+ return(20 + tmp); /* IP header plus MAC header size */
+}
+
+
+static int
+do_options(struct iphdr *iph, struct options *opt)
+{
+ unsigned char *buff;
+ int done = 0;
+ int i, len = sizeof(struct iphdr);
+
+ /* Zero out the options. */
+ opt->record_route.route_size = 0;
+ opt->loose_route.route_size = 0;
+ opt->strict_route.route_size = 0;
+ opt->tstamp.ptr = 0;
+ opt->security = 0;
+ opt->compartment = 0;
+ opt->handling = 0;
+ opt->stream = 0;
+ opt->tcc = 0;
+ return(0);
+
+ /* Advance the pointer to start at the options. */
+ buff = (unsigned char *)(iph + 1);
+
+ /* Now start the processing. */
+ while (!done && len < iph->ihl*4) switch(*buff) {
+ case IPOPT_END:
+ done = 1;
+ break;
+ case IPOPT_NOOP:
+ buff++;
+ len++;
+ break;
+ case IPOPT_SEC:
+ buff++;
+ if (*buff != 11) return(1);
+ buff++;
+ opt->security = ntohs(*(unsigned short *)buff);
+ buff += 2;
+ opt->compartment = ntohs(*(unsigned short *)buff);
+ buff += 2;
+ opt->handling = ntohs(*(unsigned short *)buff);
+ buff += 2;
+ opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
+ buff += 3;
+ len += 11;
+ break;
+ case IPOPT_LSRR:
+ buff++;
+ if ((*buff - 3)% 4 != 0) return(1);
+ len += *buff;
+ opt->loose_route.route_size = (*buff -3)/4;
+ buff++;
+ if (*buff % 4 != 0) return(1);
+ opt->loose_route.pointer = *buff/4 - 1;
+ buff++;
+ buff++;
+ for (i = 0; i < opt->loose_route.route_size; i++) {
+ if(i>=MAX_ROUTE)
+ return(1);
+ opt->loose_route.route[i] = *(unsigned long *)buff;
+ buff += 4;
+ }
+ break;
+ case IPOPT_SSRR:
+ buff++;
+ if ((*buff - 3)% 4 != 0) return(1);
+ len += *buff;
+ opt->strict_route.route_size = (*buff -3)/4;
+ buff++;
+ if (*buff % 4 != 0) return(1);
+ opt->strict_route.pointer = *buff/4 - 1;
+ buff++;
+ buff++;
+ for (i = 0; i < opt->strict_route.route_size; i++) {
+ if(i>=MAX_ROUTE)
+ return(1);
+ opt->strict_route.route[i] = *(unsigned long *)buff;
+ buff += 4;
+ }
+ break;
+ case IPOPT_RR:
+ buff++;
+ if ((*buff - 3)% 4 != 0) return(1);
+ len += *buff;
+ opt->record_route.route_size = (*buff -3)/4;
+ buff++;
+ if (*buff % 4 != 0) return(1);
+ opt->record_route.pointer = *buff/4 - 1;
+ buff++;
+ buff++;
+ for (i = 0; i < opt->record_route.route_size; i++) {
+ if(i>=MAX_ROUTE)
+ return 1;
+ opt->record_route.route[i] = *(unsigned long *)buff;
+ buff += 4;
+ }
+ break;
+ case IPOPT_SID:
+ len += 4;
+ buff +=2;
+ opt->stream = *(unsigned short *)buff;
+ buff += 2;
+ break;
+ case IPOPT_TIMESTAMP:
+ buff++;
+ len += *buff;
+ if (*buff % 4 != 0) return(1);
+ opt->tstamp.len = *buff / 4 - 1;
+ buff++;
+ if ((*buff - 1) % 4 != 0) return(1);
+ opt->tstamp.ptr = (*buff-1)/4;
+ buff++;
+ opt->tstamp.x.full_char = *buff;
+ buff++;
+ for (i = 0; i < opt->tstamp.len; i++) {
+ opt->tstamp.data[i] = *(unsigned long *)buff;
+ buff += 4;
+ }
+ break;
+ default:
+ return(1);
+ }
+
+ if (opt->record_route.route_size == 0) {
+ if (opt->strict_route.route_size != 0) {
+ memcpy(&(opt->record_route), &(opt->strict_route),
+ sizeof(opt->record_route));
+ } else if (opt->loose_route.route_size != 0) {
+ memcpy(&(opt->record_route), &(opt->loose_route),
+ sizeof(opt->record_route));
+ }
+ }
+
+ if (opt->strict_route.route_size != 0 &&
+ opt->strict_route.route_size != opt->strict_route.pointer) {
+ strict_route(iph, opt);
+ return(0);
+ }
+
+ if (opt->loose_route.route_size != 0 &&
+ opt->loose_route.route_size != opt->loose_route.pointer) {
+ loose_route(iph, opt);
+ return(0);
+ }
+
+ return(0);
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers, which
+ * always checksum on 4 octet boundaries.
+ */
+
+static inline unsigned short ip_fast_csum(unsigned char * buff, int wlen)
+{
+ unsigned long sum = 0;
+
+ if (wlen)
+ {
+ unsigned long bogus;
+ __asm__("clc\n"
+ "1:\t"
+ "lodsl\n\t"
+ "adcl %3, %0\n\t"
+ "decl %2\n\t"
+ "jne 1b\n\t"
+ "adcl $0, %0\n\t"
+ "movl %0, %3\n\t"
+ "shrl $16, %3\n\t"
+ "addw %w3, %w0\n\t"
+ "adcw $0, %w0"
+ : "=r" (sum), "=S" (buff), "=r" (wlen), "=a" (bogus)
+ : "0" (sum), "1" (buff), "2" (wlen));
+ }
+ return (~sum) & 0xffff;
+}
+
+/*
+ * This routine does all the checksum computations that don't
+ * require anything special (like copying or special headers).
+ */
+
+unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ unsigned long sum = 0;
+
+ /* Do the first multiple of 4 bytes and convert to 16 bits. */
+ if (len > 3)
+ {
+ __asm__("clc\n"
+ "1:\t"
+ "lodsl\n\t"
+ "adcl %%eax, %%ebx\n\t"
+ "loop 1b\n\t"
+ "adcl $0, %%ebx\n\t"
+ "movl %%ebx, %%eax\n\t"
+ "shrl $16, %%eax\n\t"
+ "addw %%ax, %%bx\n\t"
+ "adcw $0, %%bx"
+ : "=b" (sum) , "=S" (buff)
+ : "0" (sum), "c" (len >> 2) ,"1" (buff)
+ : "ax", "cx", "si", "bx" );
+ }
+ if (len & 2)
+ {
+ __asm__("lodsw\n\t"
+ "addw %%ax, %%bx\n\t"
+ "adcw $0, %%bx"
+ : "=b" (sum), "=S" (buff)
+ : "0" (sum), "1" (buff)
+ : "bx", "ax", "si");
+ }
+ if (len & 1)
+ {
+ __asm__("lodsb\n\t"
+ "movb $0, %%ah\n\t"
+ "addw %%ax, %%bx\n\t"
+ "adcw $0, %%bx"
+ : "=b" (sum), "=S" (buff)
+ : "0" (sum), "1" (buff)
+ : "bx", "ax", "si");
+ }
+ sum =~sum;
+ return(sum & 0xffff);
+}
+
+/*
+ * Check the header of an incoming IP datagram. This version is still used in slhc.c.
+ */
+
+int ip_csum(struct iphdr *iph)
+{
+ return ip_fast_csum((unsigned char *)iph, iph->ihl);
+}
+
+/*
+ * Generate a checksum for an outgoing IP datagram.
+ */
+
+static void ip_send_check(struct iphdr *iph)
+{
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+}
+
+/************************ Fragment Handlers From NET2E not yet with tweaks to beat 4K **********************************/
+
+
+/*
+ * This fragment handler is a bit of a heap. On the other hand it works quite
+ * happily and handles things quite well.
+ */
+
+static struct ipq *ipqueue = NULL; /* IP fragment queue */
+
+/*
+ * Create a new fragment entry.
+ */
+
+static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
+{
+ struct ipfrag *fp;
+
+ fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
+ if (fp == NULL)
+ {
+ printk("IP: frag_create: no memory left !\n");
+ return(NULL);
+ }
+ memset(fp, 0, sizeof(struct ipfrag));
+
+ /* Fill in the structure. */
+ fp->offset = offset;
+ fp->end = end;
+ fp->len = end - offset;
+ fp->skb = skb;
+ fp->ptr = ptr;
+
+ return(fp);
+}
+
+
+/*
+ * Find the correct entry in the "incomplete datagrams" queue for
+ * this IP datagram, and return the queue entry address if found.
+ */
+
+static struct ipq *ip_find(struct iphdr *iph)
+{
+ struct ipq *qp;
+ struct ipq *qplast;
+
+ cli();
+ qplast = NULL;
+ for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
+ {
+ if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
+ iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
+ {
+ del_timer(&qp->timer); /* So it doesn't vanish on us. The timer will be reset anyway */
+ sti();
+ return(qp);
+ }
+ }
+ sti();
+ return(NULL);
+}
+
+
+/*
+ * Remove an entry from the "incomplete datagrams" queue, either
+ * because we completed, reassembled and processed it, or because
+ * it timed out.
+ */
+
+static void ip_free(struct ipq *qp)
+{
+ struct ipfrag *fp;
+ struct ipfrag *xp;
+
+ /*
+ * Stop the timer for this entry.
+ */
+
+ del_timer(&qp->timer);
+
+ /* Remove this entry from the "incomplete datagrams" queue. */
+ cli();
+ if (qp->prev == NULL)
+ {
+ ipqueue = qp->next;
+ if (ipqueue != NULL)
+ ipqueue->prev = NULL;
+ }
+ else
+ {
+ qp->prev->next = qp->next;
+ if (qp->next != NULL)
+ qp->next->prev = qp->prev;
+ }
+
+ /* Release all fragment data. */
+
+ fp = qp->fragments;
+ while (fp != NULL)
+ {
+ xp = fp->next;
+ IS_SKB(fp->skb);
+ kfree_skb(fp->skb,FREE_READ);
+ kfree_s(fp, sizeof(struct ipfrag));
+ fp = xp;
+ }
+
+ /* Release the MAC header. */
+ kfree_s(qp->mac, qp->maclen);
+
+ /* Release the IP header. */
+ kfree_s(qp->iph, qp->ihlen + 8);
+
+ /* Finally, release the queue descriptor itself. */
+ kfree_s(qp, sizeof(struct ipq));
+ sti();
+}
+
+
+/*
+ * Oops- a fragment queue timed out. Kill it and send an ICMP reply.
+ */
+
+static void ip_expire(unsigned long arg)
+{
+ struct ipq *qp;
+
+ qp = (struct ipq *)arg;
+
+ /*
+ * Send an ICMP "Fragment Reassembly Timeout" message.
+ */
+
+ ip_statistics.IpReasmTimeout++;
+ ip_statistics.IpReasmFails++;
+ /* This if is always true... shrug */
+ if(qp->fragments!=NULL)
+ icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
+ ICMP_EXC_FRAGTIME, qp->dev);
+
+ /*
+ * Nuke the fragment queue.
+ */
+ ip_free(qp);
+}
+
+
+/*
+ * Add an entry to the 'ipq' queue for a newly received IP datagram.
+ * We will (hopefully :-) receive all other fragments of this datagram
+ * in time, so we just create a queue for this datagram, in which we
+ * will insert the received fragments at their respective positions.
+ */
+
+static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
+{
+ struct ipq *qp;
+ int maclen;
+ int ihlen;
+
+ qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
+ if (qp == NULL)
+ {
+ printk("IP: create: no memory left !\n");
+ return(NULL);
+ skb->dev = qp->dev;
+ }
+ memset(qp, 0, sizeof(struct ipq));
+
+ /*
+ * Allocate memory for the MAC header.
+ *
+ * FIXME: We have a maximum MAC address size limit and define
+ * elsewhere. We should use it here and avoid the 3 kmalloc() calls
+ */
+
+ maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
+ qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
+ if (qp->mac == NULL)
+ {
+ printk("IP: create: no memory left !\n");
+ kfree_s(qp, sizeof(struct ipq));
+ return(NULL);
+ }
+
+ /*
+ * Allocate memory for the IP header (plus 8 octets for ICMP).
+ */
+
+ ihlen = (iph->ihl * sizeof(unsigned long));
+ qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
+ if (qp->iph == NULL)
+ {
+ printk("IP: create: no memory left !\n");
+ kfree_s(qp->mac, maclen);
+ kfree_s(qp, sizeof(struct ipq));
+ return(NULL);
+ }
+
+ /* Fill in the structure. */
+ memcpy(qp->mac, skb->data, maclen);
+ memcpy(qp->iph, iph, ihlen + 8);
+ qp->len = 0;
+ qp->ihlen = ihlen;
+ qp->maclen = maclen;
+ qp->fragments = NULL;
+ qp->dev = dev;
+
+ /* Start a timer for this entry. */
+ qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */
+ qp->timer.data = (unsigned long) qp; /* pointer to queue */
+ qp->timer.function = ip_expire; /* expire function */
+ add_timer(&qp->timer);
+
+ /* Add this entry to the queue. */
+ qp->prev = NULL;
+ cli();
+ qp->next = ipqueue;
+ if (qp->next != NULL)
+ qp->next->prev = qp;
+ ipqueue = qp;
+ sti();
+ return(qp);
+}
+
+
+/*
+ * See if a fragment queue is complete.
+ */
+
+static int ip_done(struct ipq *qp)
+{
+ struct ipfrag *fp;
+ int offset;
+
+ /* Only possible if we received the final fragment. */
+ if (qp->len == 0)
+ return(0);
+
+ /* Check all fragment offsets to see if they connect. */
+ fp = qp->fragments;
+ offset = 0;
+ while (fp != NULL)
+ {
+ if (fp->offset > offset)
+ return(0); /* fragment(s) missing */
+ offset = fp->end;
+ fp = fp->next;
+ }
+
+ /* All fragments are present. */
+ return(1);
+}
+
+
+/*
+ * Build a new IP datagram from all its fragments.
+ *
+ * FIXME: We copy here because we lack an effective way of handling lists
+ * of bits on input. Until the new skb data handling is in I'm not going
+ * to touch this with a bargepole. This also causes a 4Kish limit on
+ * packet sizes.
+ */
+
+static struct sk_buff *ip_glue(struct ipq *qp)
+{
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct ipfrag *fp;
+ unsigned char *ptr;
+ int count, len;
+
+ /*
+ * Allocate a new buffer for the datagram.
+ */
+
+ len = qp->maclen + qp->ihlen + qp->len;
+
+ if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
+ {
+ ip_statistics.IpReasmFails++;
+ printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
+ ip_free(qp);
+ return(NULL);
+ }
+
+ /* Fill in the basic details. */
+ skb->len = (len - qp->maclen);
+ skb->h.raw = skb->data;
+ skb->free = 1;
+
+ /* Copy the original MAC and IP headers into the new buffer. */
+ ptr = (unsigned char *) skb->h.raw;
+ memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
+ ptr += qp->maclen;
+ memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
+ ptr += qp->ihlen;
+ skb->h.raw += qp->maclen;
+
+ count = 0;
+
+ /* Copy the data portions of all fragments into the new buffer. */
+ fp = qp->fragments;
+ while(fp != NULL)
+ {
+ if(count+fp->len>skb->len)
+ {
+ printk("Invalid fragment list: Fragment over size.\n");
+ ip_free(qp);
+ kfree_skb(skb,FREE_WRITE);
+ ip_statistics.IpReasmFails++;
+ return NULL;
+ }
+ memcpy((ptr + fp->offset), fp->ptr, fp->len);
+ count += fp->len;
+ fp = fp->next;
+ }
+
+ /* We glued together all fragments, so remove the queue entry. */
+ ip_free(qp);
+
+ /* Done with all fragments. Fixup the new IP header. */
+ iph = skb->h.iph;
+ iph->frag_off = 0;
+ iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
+ skb->ip_hdr = iph;
+
+ ip_statistics.IpReasmOKs++;
+ return(skb);
+}
+
+
+/*
+ * Process an incoming IP datagram fragment.
+ */
+
+static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
+{
+ struct ipfrag *prev, *next;
+ struct ipfrag *tfp;
+ struct ipq *qp;
+ struct sk_buff *skb2;
+ unsigned char *ptr;
+ int flags, offset;
+ int i, ihl, end;
+
+ ip_statistics.IpReasmReqds++;
+
+ /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
+ qp = ip_find(iph);
+
+ /* Is this a non-fragmented datagram? */
+ offset = ntohs(iph->frag_off);
+ flags = offset & ~IP_OFFSET;
+ offset &= IP_OFFSET;
+ if (((flags & IP_MF) == 0) && (offset == 0))
+ {
+ if (qp != NULL)
+ ip_free(qp); /* Huh? How could this exist?? */
+ return(skb);
+ }
+
+ offset <<= 3; /* offset is in 8-byte chunks */
+
+ /*
+ * If the queue already existed, keep restarting its timer as long
+ * as we still are receiving fragments. Otherwise, create a fresh
+ * queue entry.
+ */
+
+ if (qp != NULL)
+ {
+ del_timer(&qp->timer);
+ qp->timer.expires = IP_FRAG_TIME; /* about 30 seconds */
+ qp->timer.data = (unsigned long) qp; /* pointer to queue */
+ qp->timer.function = ip_expire; /* expire function */
+ add_timer(&qp->timer);
+ }
+ else
+ {
+ /*
+ * If we failed to create it, then discard the frame
+ */
+ if ((qp = ip_create(skb, iph, dev)) == NULL)
+ {
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ ip_statistics.IpReasmFails++;
+ return NULL;
+ }
+ }
+
+ /*
+ * Determine the position of this fragment.
+ */
+
+ ihl = (iph->ihl * sizeof(unsigned long));
+ end = offset + ntohs(iph->tot_len) - ihl;
+
+ /*
+ * Point into the IP datagram 'data' part.
+ */
+
+ ptr = skb->data + dev->hard_header_len + ihl;
+
+ /*
+ * Is this the final fragment?
+ */
+
+ if ((flags & IP_MF) == 0)
+ qp->len = end;
+
+ /*
+ * Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+
+ prev = NULL;
+ for(next = qp->fragments; next != NULL; next = next->next)
+ {
+ if (next->offset > offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+ /*
+ * We found where to put this one.
+ * Check for overlap with preceding fragment, and, if needed,
+ * align things so that any overlaps are eliminated.
+ */
+ if (prev != NULL && offset < prev->end)
+ {
+ i = prev->end - offset;
+ offset += i; /* ptr into datagram */
+ ptr += i; /* ptr into fragment data */
+ }
+
+ /*
+ * Look for overlap with succeeding segments.
+ * If we can merge fragments, do it.
+ */
+
+ for(; next != NULL; next = tfp)
+ {
+ tfp = next->next;
+ if (next->offset >= end)
+ break; /* no overlaps at all */
+
+ i = end - next->offset; /* overlap is 'i' bytes */
+ next->len -= i; /* so reduce size of */
+ next->offset += i; /* next fragment */
+ next->ptr += i;
+
+ /*
+ * If we get a frag size of <= 0, remove it and the packet
+ * that it goes with.
+ */
+ if (next->len <= 0)
+ {
+ if (next->prev != NULL)
+ next->prev->next = next->next;
+ else
+ qp->fragments = next->next;
+
+ if (tfp->next != NULL)
+ next->next->prev = next->prev;
+
+ kfree_skb(next->skb,FREE_READ);
+ kfree_s(next, sizeof(struct ipfrag));
+ }
+ }
+
+ /*
+ * Insert this fragment in the chain of fragments.
+ */
+
+ tfp = NULL;
+ tfp = ip_frag_create(offset, end, skb, ptr);
+
+ /*
+ * No memory to save the fragment - so throw the lot
+ */
+
+ if (!tfp)
+ {
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ return NULL;
+ }
+ tfp->prev = prev;
+ tfp->next = next;
+ if (prev != NULL)
+ prev->next = tfp;
+ else
+ qp->fragments = tfp;
+
+ if (next != NULL)
+ next->prev = tfp;
+
+ /*
+ * OK, so we inserted this new fragment into the chain.
+ * Check if we now have a full IP datagram which we can
+ * bump up to the IP layer...
+ */
+
+ if (ip_done(qp))
+ {
+ skb2 = ip_glue(qp); /* glue together the fragments */
+ return(skb2);
+ }
+ return(NULL);
+}
+
+
+/*
+ * This IP datagram is too large to be sent in one piece. Break it up into
+ * smaller pieces (each of size equal to the MAC header plus IP header plus
+ * a block of the data of the original IP data part) that will yet fit in a
+ * single device frame, and queue such a frame for sending by calling the
+ * ip_queue_xmit(). Note that this is recursion, and bad things will happen
+ * if this function causes a loop...
+ *
+ * Yes this is inefficient, feel free to submit a quicker one.
+ *
+ * **Protocol Violation**
+ * We copy all the options to each fragment. !FIXME!
+ */
+void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
+{
+ struct iphdr *iph;
+ unsigned char *raw;
+ unsigned char *ptr;
+ struct sk_buff *skb2;
+ int left, mtu, hlen, len;
+ int offset;
+ unsigned long flags;
+
+ /*
+ * Point into the IP datagram header.
+ */
+
+ raw = skb->data;
+ iph = (struct iphdr *) (raw + dev->hard_header_len);
+
+ skb->ip_hdr = iph;
+
+ /*
+ * Setup starting values.
+ */
+
+ hlen = (iph->ihl * sizeof(unsigned long));
+ left = ntohs(iph->tot_len) - hlen; /* Space per frame */
+ hlen += dev->hard_header_len; /* Total header size */
+ mtu = (dev->mtu - hlen); /* Size of data space */
+ ptr = (raw + hlen); /* Where to start from */
+
+ /*
+ * Check for any "DF" flag. [DF means do not fragment]
+ */
+
+ if (ntohs(iph->frag_off) & IP_DF)
+ {
+ ip_statistics.IpFragFails++;
+ icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev);
+ return;
+ }
+
+ /*
+ * The protocol doesn't seem to say what to do in the case that the
+ * frame + options doesn't fit the mtu. As it used to fall down dead
+ * in this case we were fortunate it didn't happen
+ */
+
+ if(mtu<8)
+ {
+ /* It's wrong but its better than nothing */
+ icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev);
+ ip_statistics.IpFragFails++;
+ return;
+ }
+
+ /*
+ * Fragment the datagram.
+ */
+
+ /*
+ * The initial offset is 0 for a complete frame. When
+ * fragmenting fragments its wherever this one starts.
+ */
+
+ if (is_frag & 2)
+ offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
+ else
+ offset = 0;
+
+
+ /*
+ * Keep copying data until we run out.
+ */
+
+ while(left > 0)
+ {
+ len = left;
+ /* IF: it doesn't fit, use 'mtu' - the data space left */
+ if (len > mtu)
+ len = mtu;
+ /* IF: we are not sending upto and including the packet end
+ then align the next start on an eight byte boundary */
+ if (len < left)
+ {
+ len/=8;
+ len*=8;
+ }
+ /*
+ * Allocate buffer.
+ */
+
+ if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
+ {
+ printk("IP: frag: no memory for new fragment!\n");
+ ip_statistics.IpFragFails++;
+ return;
+ }
+
+ /*
+ * Set up data on packet
+ */
+
+ skb2->arp = skb->arp;
+ if(skb->free==0)
+ printk("IP fragmenter: BUG free!=1 in fragmenter\n");
+ skb2->free = 1;
+ skb2->len = len + hlen;
+ skb2->h.raw=(char *) skb2->data;
+ /*
+ * Charge the memory for the fragment to any owner
+ * it might possess
+ */
+
+ save_flags(flags);
+ if (sk)
+ {
+ cli();
+ sk->wmem_alloc += skb2->mem_len;
+ skb2->sk=sk;
+ }
+ restore_flags(flags);
+ skb2->raddr = skb->raddr; /* For rebuild_header - must be here */
+
+ /*
+ * Copy the packet header into the new buffer.
+ */
+
+ memcpy(skb2->h.raw, raw, hlen);
+
+ /*
+ * Copy a block of the IP datagram.
+ */
+ memcpy(skb2->h.raw + hlen, ptr, len);
+ left -= len;
+
+ skb2->h.raw+=dev->hard_header_len;
+
+ /*
+ * Fill in the new header fields.
+ */
+ iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
+ iph->frag_off = htons((offset >> 3));
+ /*
+ * Added AC : If we are fragmenting a fragment thats not the
+ * last fragment then keep MF on each bit
+ */
+ if (left > 0 || (is_frag & 1))
+ iph->frag_off |= htons(IP_MF);
+ ptr += len;
+ offset += len;
+
+ /*
+ * Put this fragment into the sending queue.
+ */
+
+ ip_statistics.IpFragCreates++;
+
+ ip_queue_xmit(sk, dev, skb2, 2);
+ }
+ ip_statistics.IpFragOKs++;
+}
+
+
+
+#ifdef CONFIG_IP_FORWARD
+
+/*
+ * Forward an IP datagram to its next destination.
+ */
+
+static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
+{
+ struct device *dev2; /* Output device */
+ struct iphdr *iph; /* Our header */
+ struct sk_buff *skb2; /* Output packet */
+ struct rtable *rt; /* Route we use */
+ unsigned char *ptr; /* Data pointer */
+ unsigned long raddr; /* Router IP address */
+
+ /*
+ * According to the RFC, we must first decrease the TTL field. If
+ * that reaches zero, we must reply an ICMP control message telling
+ * that the packet's lifetime expired.
+ *
+ * Exception:
+ * We may not generate an ICMP for an ICMP. icmp_send does the
+ * enforcement of this so we can forget it here. It is however
+ * sometimes VERY important.
+ */
+
+ iph = skb->h.iph;
+ iph->ttl--;
+ if (iph->ttl <= 0)
+ {
+ /* Tell the sender its packet died... */
+ icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, dev);
+ return;
+ }
+
+ /*
+ * Re-compute the IP header checksum.
+ * This is inefficient. We know what has happened to the header
+ * and could thus adjust the checksum as Phil Karn does in KA9Q
+ */
+
+ ip_send_check(iph);
+
+ /*
+ * OK, the packet is still valid. Fetch its destination address,
+ * and give it to the IP sender for further processing.
+ */
+
+ rt = ip_rt_route(iph->daddr, NULL, NULL);
+ if (rt == NULL)
+ {
+ /*
+ * Tell the sender its packet cannot be delivered. Again
+ * ICMP is screened later.
+ */
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, dev);
+ return;
+ }
+
+
+ /*
+ * Gosh. Not only is the packet valid; we even know how to
+ * forward it onto its final destination. Can we say this
+ * is being plain lucky?
+ * If the router told us that there is no GW, use the dest.
+ * IP address itself- we seem to be connected directly...
+ */
+
+ raddr = rt->rt_gateway;
+
+ if (raddr != 0)
+ {
+ /*
+ * There is a gateway so find the correct route for it.
+ * Gateways cannot in turn be gatewayed.
+ */
+ rt = ip_rt_route(raddr, NULL, NULL);
+ if (rt == NULL)
+ {
+ /*
+ * Tell the sender its packet cannot be delivered...
+ */
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, dev);
+ return;
+ }
+ if (rt->rt_gateway != 0)
+ raddr = rt->rt_gateway;
+ }
+ else
+ raddr = iph->daddr;
+
+ /*
+ * Having picked a route we can now send the frame out.
+ */
+
+ dev2 = rt->rt_dev;
+
+ /*
+ * In IP you never forward a frame on the interface that it arrived
+ * upon. We should generate an ICMP HOST REDIRECT giving the route
+ * we calculated.
+ * For now just dropping the packet is an acceptable compromise.
+ */
+
+ if (dev == dev2)
+ return;
+
+ /*
+ * We now allocate a new buffer, and copy the datagram into it.
+ * If the indicated interface is up and running, kick it.
+ */
+
+ if (dev2->flags & IFF_UP)
+ {
+
+ /*
+ * Current design decrees we copy the packet. For identical header
+ * lengths we could avoid it. The new skb code will let us push
+ * data so the problem goes away then.
+ */
+
+ skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
+ /*
+ * This is rare and since IP is tolerant of network failures
+ * quite harmless.
+ */
+ if (skb2 == NULL)
+ {
+ printk("\nIP: No memory available for IP forward\n");
+ return;
+ }
+ ptr = skb2->data;
+ skb2->free = 1;
+ skb2->len = skb->len + dev2->hard_header_len;
+ skb2->h.raw = ptr;
+
+ /*
+ * Copy the packet data into the new buffer.
+ */
+ memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
+
+ /* Now build the MAC header. */
+ (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
+
+ ip_statistics.IpForwDatagrams++;
+
+ /*
+ * See if it needs fragmenting. Note in ip_rcv we tagged
+ * the fragment type. This must be right so that
+ * the fragmenter does the right thing.
+ */
+
+ if(skb2->len > dev2->mtu + dev2->hard_header_len)
+ {
+ ip_fragment(NULL,skb2,dev2, is_frag);
+ kfree_skb(skb2,FREE_WRITE);
+ }
+ else
+ {
+ /*
+ * Map service types to priority. We lie about
+ * throughput being low priority, but its a good
+ * choice to help improve general usage.
+ */
+ if(iph->tos & IPTOS_LOWDELAY)
+ dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
+ else if(iph->tos & IPTOS_THROUGHPUT)
+ dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
+ else
+ dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
+ }
+ }
+}
+
+
+#endif
+
+/*
+ * This function receives all incoming IP datagrams.
+ */
+
+int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
+{
+ struct iphdr *iph = skb->h.iph;
+ unsigned char hash;
+ unsigned char flag = 0;
+ unsigned char opts_p = 0; /* Set iff the packet has options. */
+ struct inet_protocol *ipprot;
+ static struct options opt; /* since we don't use these yet, and they
+ take up stack space. */
+ int brd=IS_MYADDR;
+ int is_frag=0;
+
+
+ ip_statistics.IpInReceives++;
+
+ /*
+ * Tag the ip header of this packet so we can find it
+ */
+
+ skb->ip_hdr = iph;
+
+ /*
+ * Is the datagram acceptable?
+ *
+ * 1. Length at least the size of an ip header
+ * 2. Version of 4
+ * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
+ * (4. We ought to check for IP multicast addresses and undefined types.. does this matter ?)
+ */
+
+ if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
+ {
+ ip_statistics.IpInHdrErrors++;
+ kfree_skb(skb, FREE_WRITE);
+ return(0);
+ }
+
+ /*
+ * Our transport medium may have padded the buffer out. Now we know it
+ * is IP we can trim to the true length of the frame.
+ */
+
+ skb->len=ntohs(iph->tot_len);
+
+ /*
+ * Next analyse the packet for options. Studies show under one packet in
+ * a thousand have options....
+ */
+
+ if (iph->ihl != 5)
+ { /* Fast path for the typical optionless IP packet. */
+ memset((char *) &opt, 0, sizeof(opt));
+ if (do_options(iph, &opt) != 0)
+ return 0;
+ opts_p = 1;
+ }
+
+ /*
+ * Remember if the frame is fragmented.
+ */
+
+ if(iph->frag_off)
+ {
+ if (iph->frag_off & 0x0020)
+ is_frag|=1;
+ /*
+ * Last fragment ?
+ */
+
+ if (ntohs(iph->frag_off) & 0x1fff)
+ is_frag|=2;
+ }
+
+ /*
+ * Do any IP forwarding required. chk_addr() is expensive -- avoid it someday.
+ *
+ * This is inefficient. While finding out if it is for us we could also compute
+ * the routing table entry. This is where the great unified cache theory comes
+ * in as and when someone implements it
+ *
+ * For most hosts over 99% of packets match the first conditional
+ * and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
+ * function entry.
+ */
+
+ if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
+ {
+ /*
+ * Don't forward multicast or broadcast frames.
+ */
+
+ if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
+ {
+ kfree_skb(skb,FREE_WRITE);
+ return 0;
+ }
+
+ /*
+ * The packet is for another target. Forward the frame
+ */
+
+#ifdef CONFIG_IP_FORWARD
+ ip_forward(skb, dev, is_frag);
+#else
+/* printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
+ iph->saddr,iph->daddr);*/
+ ip_statistics.IpInAddrErrors++;
+#endif
+ /*
+ * The forwarder is inefficient and copies the packet. We
+ * free the original now.
+ */
+
+ kfree_skb(skb, FREE_WRITE);
+ return(0);
+ }
+
+ /*
+ * Reassemble IP fragments.
+ */
+
+ if(is_frag)
+ {
+ /* Defragment. Obtain the complete packet if there is one */
+ skb=ip_defrag(iph,skb,dev);
+ if(skb==NULL)
+ return 0;
+ iph=skb->h.iph;
+ }
+
+ /*
+ * Point into the IP datagram, just past the header.
+ */
+
+ skb->ip_hdr = iph;
+ skb->h.raw += iph->ihl*4;
+
+ /*
+ * skb->h.raw now points at the protocol beyond the IP header.
+ */
+
+ hash = iph->protocol & (MAX_INET_PROTOS -1);
+ for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
+ {
+ struct sk_buff *skb2;
+
+ if (ipprot->protocol != iph->protocol)
+ continue;
+ /*
+ * See if we need to make a copy of it. This will
+ * only be set if more than one protocol wants it.
+ * and then not for the last one.
+ *
+ * This is an artifact of poor upper protocol design.
+ * Because the upper protocols damage the actual packet
+ * we must do copying. In actual fact it's even worse
+ * than this as TCP may hold on to the buffer.
+ */
+ if (ipprot->copy)
+ {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if(skb2==NULL)
+ continue;
+ }
+ else
+ {
+ skb2 = skb;
+ }
+ flag = 1;
+
+ /*
+ * Pass on the datagram to each protocol that wants it,
+ * based on the datagram protocol. We should really
+ * check the protocol handler's return values here...
+ */
+ ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
+ (ntohs(iph->tot_len) - (iph->ihl * 4)),
+ iph->saddr, 0, ipprot);
+
+ }
+
+ /*
+ * All protocols checked.
+ * If this packet was a broadcast, we may *not* reply to it, since that
+ * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
+ * ICMP reply messages get queued up for transmission...)
+ */
+
+ if (!flag)
+ {
+ if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, dev);
+ kfree_skb(skb, FREE_WRITE);
+ }
+
+ return(0);
+}
+
+
+/*
+ * Queues a packet to be sent, and starts the transmitter
+ * if necessary. if free = 1 then we free the block after
+ * transmit, otherwise we don't. If free==2 we not only
+ * free the block but also don't assign a new ip seq number.
+ * This routine also needs to put in the total length,
+ * and compute the checksum
+ */
+
+void ip_queue_xmit(struct sock *sk, struct device *dev,
+ struct sk_buff *skb, int free)
+{
+ struct iphdr *iph;
+ unsigned char *ptr;
+
+ /* Sanity check */
+ if (dev == NULL)
+ {
+ printk("IP: ip_queue_xmit dev = NULL\n");
+ return;
+ }
+
+ IS_SKB(skb);
+
+ /*
+ * Do some book-keeping in the packet for later
+ */
+
+
+ skb->dev = dev;
+ skb->when = jiffies;
+
+ /*
+ * Find the IP header and set the length. This is bad
+ * but once we get the skb data handling code in the
+ * hardware will push its header sensibly and we will
+ * set skb->ip_hdr to avoid this mess and the fixed
+ * header length problem
+ */
+
+ ptr = skb->data;
+ ptr += dev->hard_header_len;
+ iph = (struct iphdr *)ptr;
+ skb->ip_hdr = iph;
+ iph->tot_len = ntohs(skb->len-dev->hard_header_len);
+
+ /*
+ * No reassigning numbers to fragments...
+ */
+
+ if(free!=2)
+ iph->id = htons(ip_id_count++);
+ else
+ free=1;
+
+ /* All buffers without an owner socket get freed */
+ if (sk == NULL)
+ free = 1;
+
+ skb->free = free;
+
+ /*
+ * Do we need to fragment. Again this is inefficient.
+ * We need to somehow lock the original buffer and use
+ * bits of it.
+ */
+
+ if(skb->len > dev->mtu + dev->hard_header_len)
+ {
+ ip_fragment(sk,skb,dev,0);
+ IS_SKB(skb);
+ kfree_skb(skb,FREE_WRITE);
+ return;
+ }
+
+ /*
+ * Add an IP checksum
+ */
+
+ ip_send_check(iph);
+
+ /*
+ * Print the frame when debugging
+ */
+
+ /*
+ * More debugging. You cannot queue a packet already on a list
+ * Spot this and moan loudly.
+ */
+ if (skb->next != NULL)
+ {
+ printk("ip_queue_xmit: next != NULL\n");
+ skb_unlink(skb);
+ }
+
+ /*
+ * If a sender wishes the packet to remain unfreed
+ * we add it to his send queue. This arguably belongs
+ * in the TCP level since nobody else uses it. BUT
+ * remember IPng might change all the rules.
+ */
+
+ if (!free)
+ {
+ unsigned long flags;
+ /* The socket now has more outstanding blocks */
+
+ sk->packets_out++;
+
+ /* Protect the list for a moment */
+ save_flags(flags);
+ cli();
+
+ if (skb->link3 != NULL)
+ {
+ printk("ip.c: link3 != NULL\n");
+ skb->link3 = NULL;
+ }
+ if (sk->send_head == NULL)
+ {
+ sk->send_tail = skb;
+ sk->send_head = skb;
+ }
+ else
+ {
+ sk->send_tail->link3 = skb;
+ sk->send_tail = skb;
+ }
+ /* skb->link3 is NULL */
+
+ /* Interrupt restore */
+ restore_flags(flags);
+ /* Set the IP write timeout to the round trip time for the packet.
+ If an acknowledge has not arrived by then we may wish to act */
+ reset_timer(sk, TIME_WRITE, sk->rto);
+ }
+ else
+ /* Remember who owns the buffer */
+ skb->sk = sk;
+
+ /*
+ * If the indicated interface is up and running, send the packet.
+ */
+ ip_statistics.IpOutRequests++;
+
+ if (dev->flags & IFF_UP)
+ {
+ /*
+ * If we have an owner use its priority setting,
+ * otherwise use NORMAL
+ */
+
+ if (sk != NULL)
+ {
+ dev_queue_xmit(skb, dev, sk->priority);
+ }
+ else
+ {
+ dev_queue_xmit(skb, dev, SOPRI_NORMAL);
+ }
+ }
+ else
+ {
+ ip_statistics.IpOutDiscards++;
+ if (free)
+ kfree_skb(skb, FREE_WRITE);
+ }
+}
+
+
+/*
+ * A socket has timed out on its send queue and wants to do a
+ * little retransmitting. Currently this means TCP.
+ */
+
+void ip_do_retransmit(struct sock *sk, int all)
+{
+ struct sk_buff * skb;
+ struct proto *prot;
+ struct device *dev;
+
+ prot = sk->prot;
+ skb = sk->send_head;
+
+ while (skb != NULL)
+ {
+ dev = skb->dev;
+ IS_SKB(skb);
+ skb->when = jiffies;
+
+ /*
+ * In general it's OK just to use the old packet. However we
+ * need to use the current ack and window fields. Urg and
+ * urg_ptr could possibly stand to be updated as well, but we
+ * don't keep the necessary data. That shouldn't be a problem,
+ * if the other end is doing the right thing. Since we're
+ * changing the packet, we have to issue a new IP identifier.
+ */
+
+ /* this check may be unnecessary - retransmit only for TCP */
+ if (sk->protocol == IPPROTO_TCP) {
+ struct tcphdr *th;
+ struct iphdr *iph;
+ int size;
+
+ iph = (struct iphdr *)(skb->data + dev->hard_header_len);
+ th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
+ size = skb->len - (((unsigned char *) th) - skb->data);
+
+ iph->id = htons(ip_id_count++);
+ ip_send_check(iph);
+
+ th->ack_seq = ntohl(sk->acked_seq);
+ th->window = ntohs(tcp_select_window(sk));
+ tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
+ }
+
+ /*
+ * If the interface is (still) up and running, kick it.
+ */
+
+ if (dev->flags & IFF_UP)
+ {
+ /*
+ * If the packet is still being sent by the device/protocol
+ * below then don't retransmit. This is both needed, and good -
+ * especially with connected mode AX.25 where it stops resends
+ * occurring of an as yet unsent anyway frame!
+ * We still add up the counts as the round trip time wants
+ * adjusting.
+ */
+ if (sk && !skb_device_locked(skb))
+ {
+ /* Remove it from any existing driver queue first! */
+ skb_unlink(skb);
+ /* Now queue it */
+ ip_statistics.IpOutRequests++;
+ dev_queue_xmit(skb, dev, sk->priority);
+ }
+ }
+
+ /*
+ * Count retransmissions
+ */
+ sk->retransmits++;
+ sk->prot->retransmits ++;
+
+ /*
+ * Only one retransmit requested.
+ */
+ if (!all)
+ break;
+
+ /*
+ * This should cut it off before we send too many packets.
+ */
+ if (sk->retransmits >= sk->cong_window)
+ break;
+ skb = skb->link3;
+ }
+}
+
+/*
+ * This is the normal code called for timeouts. It does the retransmission
+ * and then does backoff. ip_do_retransmit is separated out because
+ * tcp_ack needs to send stuff from the retransmit queue without
+ * initiating a backoff.
+ */
+
+void ip_retransmit(struct sock *sk, int all)
+{
+ ip_do_retransmit(sk, all);
+
+ /*
+ * Increase the timeout each time we retransmit. Note that
+ * we do not increase the rtt estimate. rto is initialized
+ * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
+ * that doubling rto each time is the least we can get away with.
+ * In KA9Q, Karn uses this for the first few times, and then
+ * goes to quadratic. netBSD doubles, but only goes up to *64,
+ * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
+ * defined in the protocol as the maximum possible RTT. I guess
+ * we'll have to use something other than TCP to talk to the
+ * University of Mars.
+ */
+
+ sk->retransmits++;
+ sk->backoff++;
+ sk->rto = min(sk->rto << 1, 120*HZ);
+ reset_timer(sk, TIME_WRITE, sk->rto);
+}
+
+/*
+ * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
+ * an IP socket.
+ *
+ * We implement IP_TOS (type of service), IP_TTL (time to live).
+ *
+ * Next release we will sort out IP_OPTIONS since for some people are kind of important.
+ */
+
+int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
+{
+ int val,err;
+
+ if (optval == NULL)
+ return(-EINVAL);
+
+ err=verify_area(VERIFY_READ, optval, sizeof(int));
+ if(err)
+ return err;
+
+ val = get_fs_long((unsigned long *)optval);
+
+ if(level!=SOL_IP)
+ return -EOPNOTSUPP;
+
+ switch(optname)
+ {
+ case IP_TOS:
+ if(val<0||val>255)
+ return -EINVAL;
+ sk->ip_tos=val;
+ if(val==IPTOS_LOWDELAY)
+ sk->priority=SOPRI_INTERACTIVE;
+ if(val==IPTOS_THROUGHPUT)
+ sk->priority=SOPRI_BACKGROUND;
+ return 0;
+ case IP_TTL:
+ if(val<1||val>255)
+ return -EINVAL;
+ sk->ip_ttl=val;
+ return 0;
+ /* IP_OPTIONS and friends go here eventually */
+ default:
+ return(-ENOPROTOOPT);
+ }
+}
+
+/*
+ * Get the options. Note for future reference. The GET of IP options gets the
+ * _received_ ones. The set sets the _sent_ ones.
+ */
+
+int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
+{
+ int val,err;
+
+ if(level!=SOL_IP)
+ return -EOPNOTSUPP;
+
+ switch(optname)
+ {
+ case IP_TOS:
+ val=sk->ip_tos;
+ break;
+ case IP_TTL:
+ val=sk->ip_ttl;
+ break;
+ default:
+ return(-ENOPROTOOPT);
+ }
+ err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(sizeof(int),(unsigned long *) optlen);
+
+ err=verify_area(VERIFY_WRITE, optval, sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(val,(unsigned long *)optval);
+
+ return(0);
+}
+
+/*
+ * IP protocol layer initialiser
+ */
+
+static struct packet_type ip_packet_type =
+{
+ 0, /* MUTTER ntohs(ETH_P_IP),*/
+ 0, /* copy */
+ ip_rcv,
+ NULL,
+ NULL,
+};
+
+
+/*
+ * IP registers the packet type and then calls the subprotocol initialisers
+ */
+
+void ip_init(void)
+{
+ ip_packet_type.type=htons(ETH_P_IP);
+ dev_add_pack(&ip_packet_type);
+/* ip_raw_init();
+ ip_packet_init();
+ ip_tcp_init();
+ ip_udp_init();*/
+}
diff --git a/net/inet/ip.h b/net/inet/ip.h
new file mode 100644
index 000000000..dd2cbc5ca
--- /dev/null
+++ b/net/inet/ip.h
@@ -0,0 +1,91 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP module.
+ *
+ * Version: @(#)ip.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _IP_H
+#define _IP_H
+
+
+#include <linux/ip.h>
+
+#ifndef _SNMP_H
+#include "snmp.h"
+#endif
+
+#include "sock.h" /* struct sock */
+
+/* IP flags. */
+#define IP_CE 0x8000 /* Flag: "Congestion" */
+#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
+#define IP_MF 0x2000 /* Flag: "More Fragments" */
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+
+#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
+
+
+/* Describe an IP fragment. */
+struct ipfrag {
+ int offset; /* offset of fragment in IP datagram */
+ int end; /* last byte of data in datagram */
+ int len; /* length of this fragment */
+ struct sk_buff *skb; /* complete received fragment */
+ unsigned char *ptr; /* pointer into real fragment data */
+ struct ipfrag *next; /* linked list pointers */
+ struct ipfrag *prev;
+};
+
+/* Describe an entry in the "incomplete datagrams" queue. */
+struct ipq {
+ unsigned char *mac; /* pointer to MAC header */
+ struct iphdr *iph; /* pointer to IP header */
+ int len; /* total length of original datagram */
+ short ihlen; /* length of the IP header */
+ short maclen; /* length of the MAC header */
+ struct timer_list timer; /* when will this queue expire? */
+ struct ipfrag *fragments; /* linked list of received fragments */
+ struct ipq *next; /* linked list pointers */
+ struct ipq *prev;
+ struct device *dev; /* Device - for icmp replies */
+};
+
+
+extern int backoff(int n);
+
+extern void ip_print(const struct iphdr *ip);
+extern int ip_ioctl(struct sock *sk, int cmd,
+ unsigned long arg);
+extern void ip_route_check(unsigned long daddr);
+extern int ip_build_header(struct sk_buff *skb,
+ unsigned long saddr,
+ unsigned long daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos,int ttl);
+extern unsigned short ip_compute_csum(unsigned char * buff, int len);
+extern int ip_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern void ip_queue_xmit(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+extern void ip_retransmit(struct sock *sk, int all);
+extern void ip_do_retransmit(struct sock *sk, int all);
+extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);
+extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);
+extern void ip_init(void);
+
+extern struct ip_mib ip_statistics;
+#endif /* _IP_H */
diff --git a/net/inet/ipx.c b/net/inet/ipx.c
new file mode 100644
index 000000000..1d41acc16
--- /dev/null
+++ b/net/inet/ipx.c
@@ -0,0 +1,1360 @@
+/*
+ * Implements an IPX socket layer (badly - but I'm working on it).
+ *
+ * This code is derived from work by
+ * Ross Biro : Writing the original IP stack
+ * Fred Van Kempen : Tidying up the TCP/IP
+ *
+ * Many thanks go to Keith Baker, Institute For Industrial Information
+ * Technology Ltd, Swansea University for allowing me to work on this
+ * in my own time even though it was in some ways related to commercial
+ * work I am currently employed to do there.
+ *
+ * All the material in this file is subject to the Gnu license version 2.
+ * Neither Alan Cox nor the Swansea University Computer Society admit liability
+ * nor provide warranty for any of this software. This material is provided
+ * as is and at no charge.
+ *
+ * Revision 0.21: Uses the new generic socket option code.
+ * Revision 0.22: Gcc clean ups and drop out device registration. Use the
+ * new multi-protocol edition of hard_header
+ * Revision 0.23: IPX /proc by Mark Evans.
+ * Adding a route will overwrite any existing route to the same
+ * network.
+ * Revision 0.24: Supports new /proc with no 4K limit
+ * Revision 0.25: Add ephemeral sockets, passive local network
+ * identification, support for local net 0 and
+ * multiple datalinks <Greg Page>
+ * Revision 0.26: Device drop kills IPX routes via it. (needed for modules)
+ * Revision 0.27: Autobind <Mark Evans>
+ * Revision 0.28: Small fix for multiple local networks <Thomas Winder>
+ * Revision 0.29: Assorted major errors removed <Mark Evans>
+ * Small correction to promisc mode error fix <Alan Cox>
+ * Asynchronous I/O support.
+ *
+ *
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/ipx.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include "sock.h"
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/termios.h> /* For TIOCOUTQ/INQ */
+#include <linux/interrupt.h>
+#include "p8022.h"
+
+#ifdef CONFIG_IPX
+/***********************************************************************************************************************\
+* *
+* Handlers for the socket list. *
+* *
+\***********************************************************************************************************************/
+
+static ipx_socket *volatile ipx_socket_list=NULL;
+
+/*
+ * Note: Sockets may not be removed _during_ an interrupt or inet_bh
+ * handler using this technique. They can be added although we do not
+ * use this facility.
+ */
+
+static void ipx_remove_socket(ipx_socket *sk)
+{
+ ipx_socket *s;
+
+ cli();
+ s=ipx_socket_list;
+ if(s==sk)
+ {
+ ipx_socket_list=s->next;
+ sti();
+ return;
+ }
+ while(s && s->next)
+ {
+ if(s->next==sk)
+ {
+ s->next=sk->next;
+ sti();
+ return;
+ }
+ s=s->next;
+ }
+ sti();
+}
+
+static void ipx_insert_socket(ipx_socket *sk)
+{
+ cli();
+ sk->next=ipx_socket_list;
+ ipx_socket_list=sk;
+ sti();
+}
+
+static ipx_socket *ipx_find_socket(int port)
+{
+ ipx_socket *s;
+ s=ipx_socket_list;
+ while(s)
+ {
+ if(s->ipx_source_addr.sock==port)
+ {
+ return(s);
+ }
+ s=s->next;
+ }
+ return(NULL);
+}
+
+/*
+ * This is only called from user mode. Thus it protects itself against
+ * interrupt users but doesn't worry about being called during work.
+ * Once it is removed from the queue no interrupt or bottom half will
+ * touch it and we are (fairly 8-) ) safe.
+ */
+
+static void ipx_destroy_socket(ipx_socket *sk)
+{
+ struct sk_buff *skb;
+ ipx_remove_socket(sk);
+
+ while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
+ {
+ kfree_skb(skb,FREE_READ);
+ }
+
+ kfree_s(sk,sizeof(*sk));
+}
+
+
+/* Called from proc fs */
+int ipx_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ ipx_socket *s;
+ int len=0;
+ off_t pos=0;
+ off_t begin=0;
+
+ /* Theory.. Keep printing in the same place until we pass offset */
+
+ len += sprintf (buffer,"Type local_address rem_address tx_queue rx_queue st uid\n");
+ for (s = ipx_socket_list; s != NULL; s = s->next)
+ {
+ len += sprintf (buffer+len,"%02X ", s->ipx_type);
+ len += sprintf (buffer+len,"%08lX:%02X%02X%02X%02X%02X%02X:%04X ", htonl(s->ipx_source_addr.net),
+ s->ipx_source_addr.node[0], s->ipx_source_addr.node[1], s->ipx_source_addr.node[2],
+ s->ipx_source_addr.node[3], s->ipx_source_addr.node[4], s->ipx_source_addr.node[5],
+ htons(s->ipx_source_addr.sock));
+ len += sprintf (buffer+len,"%08lX:%02X%02X%02X%02X%02X%02X:%04X ", htonl(s->ipx_dest_addr.net),
+ s->ipx_dest_addr.node[0], s->ipx_dest_addr.node[1], s->ipx_dest_addr.node[2],
+ s->ipx_dest_addr.node[3], s->ipx_dest_addr.node[4], s->ipx_dest_addr.node[5],
+ htons(s->ipx_dest_addr.sock));
+ len += sprintf (buffer+len,"%08lX:%08lX ", s->wmem_alloc, s->rmem_alloc);
+ len += sprintf (buffer+len,"%02X %d\n", s->state, SOCK_INODE(s->socket)->i_uid);
+
+ /* Are we still dumping unwanted data then discard the record */
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0; /* Keep dumping into the buffer start */
+ begin=pos;
+ }
+ if(pos>offset+length) /* We have dumped enough */
+ break;
+ }
+
+ /* The data in question runs from begin to begin+len */
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Remove unwanted header data from length */
+ if(len>length)
+ len=length; /* Remove unwanted tail data from length */
+
+ return len;
+}
+
+/*******************************************************************************************************************\
+* *
+* Routing tables for the IPX socket layer *
+* *
+\*******************************************************************************************************************/
+
+
+static struct datalink_proto *p8022_datalink = NULL;
+static struct datalink_proto *pEII_datalink = NULL;
+static struct datalink_proto *p8023_datalink = NULL;
+static struct datalink_proto *pSNAP_datalink = NULL;
+
+static ipx_route *ipx_router_list=NULL;
+static ipx_route *ipx_localnet_list=NULL;
+
+static ipx_route *
+ipxrtr_get_local_net(struct device *dev, unsigned short datalink)
+{
+ ipx_route *r;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ r=ipx_localnet_list;
+ while(r!=NULL)
+ {
+ if((r->dev==dev) && (r->dlink_type == datalink))
+ {
+ restore_flags(flags);
+ return r;
+ }
+ r=r->nextlocal;
+ }
+ restore_flags(flags);
+ return NULL;
+}
+
+static ipx_route *
+ipxrtr_get_default_net(void)
+{
+ return ipx_localnet_list;
+}
+
+static ipx_route *ipxrtr_get_dev(long net)
+{
+ ipx_route *r;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ r=ipx_router_list;
+ while(r!=NULL)
+ {
+ if(r->net==net)
+ {
+ restore_flags(flags);
+ return r;
+ }
+ r=r->next;
+ }
+ restore_flags(flags);
+ return NULL;
+}
+
+static void ipxrtr_add_localnet(ipx_route *newnet)
+{
+ ipx_route *r;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ newnet->nextlocal = NULL;
+ if (ipx_localnet_list == NULL) {
+ ipx_localnet_list = newnet;
+ restore_flags(flags);
+ return;
+ }
+
+ r=ipx_localnet_list;
+ while(r->nextlocal!=NULL)
+ r=r->nextlocal;
+
+ r->nextlocal = newnet;
+
+ restore_flags(flags);
+ return;
+}
+
+static int ipxrtr_create(struct ipx_route_def *r)
+{
+ ipx_route *rt=ipxrtr_get_dev(r->ipx_network);
+ struct device *dev;
+ unsigned short dlink_type;
+ struct datalink_proto *datalink = NULL;
+
+ if (r->ipx_flags & IPX_RT_BLUEBOOK) {
+ dlink_type = htons(ETH_P_IPX);
+ datalink = pEII_datalink;
+ } else if (r->ipx_flags & IPX_RT_8022) {
+ dlink_type = htons(ETH_P_802_2);
+ datalink = p8022_datalink;
+ } else if (r->ipx_flags & IPX_RT_SNAP) {
+ dlink_type = htons(ETH_P_SNAP);
+ datalink = pSNAP_datalink;
+ } else {
+ dlink_type = htons(ETH_P_802_3);
+ datalink = p8023_datalink;
+ }
+
+ if (datalink == NULL) {
+ printk("IPX: Unsupported datalink protocol.\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ if(r->ipx_router_network!=0)
+ {
+ /* Adding an indirect route */
+ ipx_route *rt1=ipxrtr_get_dev(r->ipx_router_network);
+ if(rt1==NULL)
+ return -ENETUNREACH;
+ if(rt1->flags&IPX_RT_ROUTED)
+ return -EMULTIHOP;
+ if (rt==NULL)
+ {
+ rt=(ipx_route *)kmalloc(sizeof(ipx_route),GFP_ATOMIC); /* Because we are brave and don't lock the table! */
+ if(rt==NULL)
+ return -EAGAIN;
+ rt->next=ipx_router_list;
+ ipx_router_list=rt;
+ }
+ rt->net=r->ipx_network;
+ rt->router_net=r->ipx_router_network;
+ memcpy(rt->router_node,r->ipx_router_node,sizeof(rt->router_node));
+ rt->flags=IPX_RT_ROUTED;
+ rt->dlink_type = dlink_type;
+ rt->datalink = datalink;
+ rt->dev=rt1->dev;
+ return 0;
+ }
+ /* Add a direct route */
+ dev=dev_get(r->ipx_device);
+ if(dev==NULL)
+ return -ENODEV;
+ /* Check addresses are suitable */
+ if(dev->addr_len>6)
+ return -EINVAL;
+ if(dev->addr_len<2)
+ return -EINVAL;
+ if (ipxrtr_get_local_net(dev, dlink_type) != NULL)
+ return -EEXIST;
+ /* Ok now create */
+ rt=(ipx_route *)kmalloc(sizeof(ipx_route),GFP_ATOMIC); /* Because we are brave and don't lock the table! */
+ if(rt==NULL)
+ return -EAGAIN;
+ rt->next=ipx_router_list;
+ ipx_router_list=rt;
+ rt->router_net=0;
+ memset(rt->router_node,0,sizeof(rt->router_node));
+ rt->dev=dev;
+ rt->net=r->ipx_network;
+ rt->flags=0;
+ rt->dlink_type = dlink_type;
+ rt->datalink = datalink;
+ ipxrtr_add_localnet(rt);
+ return 0;
+}
+
+
+static int ipxrtr_delete_localnet(ipx_route *d)
+{
+ ipx_route **r = &ipx_localnet_list;
+ ipx_route *tmp;
+
+ while ((tmp = *r) != NULL) {
+ if (tmp == d) {
+ *r = tmp->next;
+ return 0;
+ }
+ r = &tmp->nextlocal;
+ }
+ return -ENOENT;
+}
+
+static int ipxrtr_delete(long net)
+{
+ ipx_route **r = &ipx_router_list;
+ ipx_route *tmp;
+
+ while ((tmp = *r) != NULL) {
+ if (tmp->net == net) {
+ *r = tmp->next;
+ if (tmp->router_net == 0) {
+ ipxrtr_delete_localnet(tmp);
+ }
+ kfree_s(tmp, sizeof(ipx_route));
+ return 0;
+ }
+ r = &tmp->next;
+ }
+ return -ENOENT;
+}
+
+void ipxrtr_device_down(struct device *dev)
+{
+ ipx_route **r = &ipx_router_list;
+ ipx_route *tmp;
+
+ while ((tmp = *r) != NULL) {
+ if (tmp->dev == dev) {
+ *r = tmp->next;
+ if(tmp->router_net == 0)
+ ipxrtr_delete_localnet(tmp);
+ kfree_s(tmp, sizeof(ipx_route));
+ }
+ r = &tmp->next;
+ }
+}
+
+static int ipxrtr_ioctl(unsigned int cmd, void *arg)
+{
+ int err;
+ switch(cmd)
+ {
+ case SIOCDELRT:
+ err=verify_area(VERIFY_READ,arg,sizeof(long));
+ if(err)
+ return err;
+ return ipxrtr_delete(get_fs_long(arg));
+ case SIOCADDRT:
+ {
+ struct ipx_route_def f;
+ err=verify_area(VERIFY_READ,arg,sizeof(f));
+ if(err)
+ return err;
+ memcpy_fromfs(&f,arg,sizeof(f));
+ return ipxrtr_create(&f);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Called from proc fs */
+int ipx_rt_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ ipx_route *rt;
+ int len=0;
+ off_t pos=0;
+ off_t begin=0;
+
+ len += sprintf (buffer,"Net Router Flags Dev\n");
+ for (rt = ipx_router_list; rt != NULL; rt = rt->next)
+ {
+ len += sprintf (buffer+len,"%08lX %08lX:%02X%02X%02X%02X%02X%02X %02X %s\n", ntohl(rt->net),
+ ntohl(rt->router_net), rt->router_node[0], rt->router_node[1], rt->router_node[2],
+ rt->router_node[3], rt->router_node[4], rt->router_node[5], rt->flags, rt->dev->name);
+ pos=begin+len;
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+ *start=buffer+(offset-begin);
+ len-=(offset-begin);
+ if(len>length)
+ len=length;
+ return len;
+}
+
+/*******************************************************************************************************************\
+* *
+* Handling for system calls applied via the various interfaces to an IPX socket object *
+* *
+\*******************************************************************************************************************/
+
+static int ipx_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+ switch(cmd)
+ {
+ default:
+ return(-EINVAL);
+ }
+}
+
+static int ipx_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
+{
+ ipx_socket *sk;
+ int err,opt;
+
+ sk=(ipx_socket *)sock->data;
+
+ if(optval==NULL)
+ return(-EINVAL);
+
+ err=verify_area(VERIFY_READ,optval,sizeof(int));
+ if(err)
+ return err;
+ opt=get_fs_long((unsigned long *)optval);
+
+ switch(level)
+ {
+ case SOL_IPX:
+ switch(optname)
+ {
+ case IPX_TYPE:
+ if(!suser())
+ return(-EPERM);
+ sk->ipx_type=opt;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case SOL_SOCKET:
+ return sock_setsockopt(sk,level,optname,optval,optlen);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ipx_getsockopt(struct socket *sock, int level, int optname,
+ char *optval, int *optlen)
+{
+ ipx_socket *sk;
+ int val=0;
+ int err;
+
+ sk=(ipx_socket *)sock->data;
+
+ switch(level)
+ {
+
+ case SOL_IPX:
+ switch(optname)
+ {
+ case IPX_TYPE:
+ val=sk->ipx_type;
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+ break;
+
+ case SOL_SOCKET:
+ return sock_getsockopt(sk,level,optname,optval,optlen);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ err=verify_area(VERIFY_WRITE,optlen,sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(sizeof(int),(unsigned long *)optlen);
+ err=verify_area(VERIFY_WRITE,optval,sizeof(int));
+ put_fs_long(val,(unsigned long *)optval);
+ return(0);
+}
+
+static int ipx_listen(struct socket *sock, int backlog)
+{
+ return -EOPNOTSUPP;
+}
+
+static void def_callback1(struct sock *sk)
+{
+ if(!sk->dead)
+ wake_up_interruptible(sk->sleep);
+}
+
+static void def_callback2(struct sock *sk, int len)
+{
+ if(!sk->dead)
+ {
+ wake_up_interruptible(sk->sleep);
+ sock_wake_async(sk->socket);
+ }
+}
+
+static int ipx_create(struct socket *sock, int protocol)
+{
+ ipx_socket *sk;
+ sk=(ipx_socket *)kmalloc(sizeof(*sk),GFP_KERNEL);
+ if(sk==NULL)
+ return(-ENOMEM);
+ switch(sock->type)
+ {
+ case SOCK_DGRAM:
+ break;
+ default:
+ kfree_s((void *)sk,sizeof(*sk));
+ return(-ESOCKTNOSUPPORT);
+ }
+ sk->dead=0;
+ sk->next=NULL;
+ sk->broadcast=0;
+ sk->rcvbuf=SK_RMEM_MAX;
+ sk->sndbuf=SK_WMEM_MAX;
+ sk->wmem_alloc=0;
+ sk->rmem_alloc=0;
+ sk->inuse=0;
+ sk->shutdown=0;
+ sk->prot=NULL; /* So we use default free mechanisms */
+ sk->broadcast=0;
+ sk->err=0;
+ skb_queue_head_init(&sk->receive_queue);
+ skb_queue_head_init(&sk->write_queue);
+ sk->send_head=NULL;
+ skb_queue_head_init(&sk->back_log);
+ sk->state=TCP_CLOSE;
+ sk->socket=sock;
+ sk->type=sock->type;
+ sk->ipx_type=0; /* General user level IPX */
+ sk->debug=0;
+
+ memset(&sk->ipx_dest_addr,'\0',sizeof(sk->ipx_dest_addr));
+ memset(&sk->ipx_source_addr,'\0',sizeof(sk->ipx_source_addr));
+ sk->mtu=IPX_MTU;
+
+ if(sock!=NULL)
+ {
+ sock->data=(void *)sk;
+ sk->sleep=sock->wait;
+ }
+
+ sk->state_change=def_callback1;
+ sk->data_ready=def_callback2;
+ sk->write_space=def_callback1;
+ sk->error_report=def_callback1;
+
+ sk->zapped=1;
+ return(0);
+}
+
+static int ipx_dup(struct socket *newsock,struct socket *oldsock)
+{
+ return(ipx_create(newsock,SOCK_DGRAM));
+}
+
+static int ipx_release(struct socket *sock, struct socket *peer)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+ if(sk==NULL)
+ return(0);
+ if(!sk->dead)
+ sk->state_change(sk);
+ sk->dead=1;
+ sock->data=NULL;
+ ipx_destroy_socket(sk);
+ return(0);
+}
+
+static unsigned short first_free_socketnum(void)
+{
+ static unsigned short socketNum = 0x4000;
+
+ while (ipx_find_socket(ntohs(socketNum)) != NULL)
+ if (socketNum > 0x7ffc)
+ socketNum = 0x4000;
+ else
+ socketNum++;
+
+ return ntohs(socketNum);
+}
+
+static int ipx_bind(struct socket *sock, struct sockaddr *uaddr,int addr_len)
+{
+ ipx_socket *sk;
+ struct ipx_route *rt;
+ unsigned char *nodestart;
+ struct sockaddr_ipx *addr=(struct sockaddr_ipx *)uaddr;
+
+ sk=(ipx_socket *)sock->data;
+
+ if(sk->zapped==0)
+ return(-EIO);
+
+ if(addr_len!=sizeof(struct sockaddr_ipx))
+ return -EINVAL;
+
+ if (addr->sipx_port == 0)
+ {
+ addr->sipx_port = first_free_socketnum();
+ if (addr->sipx_port == 0)
+ return -EINVAL;
+ }
+
+ if(ntohs(addr->sipx_port)<0x4000 && !suser())
+ return(-EPERM); /* protect IPX system stuff like routing/sap */
+
+ /* Source addresses are easy. It must be our network:node pair for
+ an interface routed to IPX with the ipx routing ioctl() */
+
+ if(ipx_find_socket(addr->sipx_port)!=NULL)
+ {
+ if(sk->debug)
+ printk("IPX: bind failed because port %X in use.\n",
+ (int)addr->sipx_port);
+ return -EADDRINUSE;
+ }
+
+ sk->ipx_source_addr.sock=addr->sipx_port;
+
+ if (addr->sipx_network == 0L)
+ {
+ rt = ipxrtr_get_default_net();
+ }
+ else
+ {
+ rt = ipxrtr_get_dev(addr->sipx_network);
+ }
+
+ if(rt == NULL)
+ {
+ if(sk->debug)
+ printk("IPX: bind failed (no device for net %lX)\n",
+ sk->ipx_source_addr.net);
+ return -EADDRNOTAVAIL;
+ }
+
+ sk->ipx_source_addr.net=rt->net;
+
+ /* IPX addresses zero pad physical addresses less than 6 */
+ memset(sk->ipx_source_addr.node,'\0',6);
+ nodestart = sk->ipx_source_addr.node + (6 - rt->dev->addr_len);
+ memcpy(nodestart,rt->dev->dev_addr,rt->dev->addr_len);
+
+ ipx_insert_socket(sk);
+ sk->zapped=0;
+ if(sk->debug)
+ printk("IPX: socket is bound.\n");
+ return(0);
+}
+
+static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+ struct sockaddr_ipx *addr;
+
+ sk->state = TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+
+ if(addr_len!=sizeof(*addr))
+ return(-EINVAL);
+ addr=(struct sockaddr_ipx *)uaddr;
+
+ if(sk->ipx_source_addr.net==0)
+ /* put the autobinding in */
+ {
+ struct sockaddr_ipx uaddr;
+ int ret;
+
+ uaddr.sipx_port = 0;
+ uaddr.sipx_network = 0L;
+ ret = ipx_bind (sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx));
+ if (ret != 0) return (ret);
+ }
+
+ sk->ipx_dest_addr.net=addr->sipx_network;
+ sk->ipx_dest_addr.sock=addr->sipx_port;
+ memcpy(sk->ipx_dest_addr.node,addr->sipx_node,sizeof(sk->ipx_source_addr.node));
+ if(ipxrtr_get_dev(sk->ipx_dest_addr.net)==NULL)
+ return -ENETUNREACH;
+ sk->ipx_type=addr->sipx_type;
+ sock->state = SS_CONNECTED;
+ sk->state=TCP_ESTABLISHED;
+ return(0);
+}
+
+static int ipx_socketpair(struct socket *sock1, struct socket *sock2)
+{
+ return(-EOPNOTSUPP);
+}
+
+static int ipx_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ if(newsock->data)
+ kfree_s(newsock->data,sizeof(ipx_socket));
+ return -EOPNOTSUPP;
+}
+
+static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ ipx_address *addr;
+ struct sockaddr_ipx sipx;
+ ipx_socket *sk;
+
+ sk=(ipx_socket *)sock->data;
+
+ *uaddr_len = sizeof(struct sockaddr_ipx);
+
+ if(peer)
+ {
+ if(sk->state!=TCP_ESTABLISHED)
+ return -ENOTCONN;
+ addr=&sk->ipx_dest_addr;
+ }
+ else
+ addr=&sk->ipx_source_addr;
+
+ sipx.sipx_family = AF_IPX;
+ sipx.sipx_type = sk->ipx_type;
+ sipx.sipx_port = addr->sock;
+ sipx.sipx_network = addr->net;
+ memcpy(sipx.sipx_node,addr->node,sizeof(sipx.sipx_node));
+ memcpy(uaddr,&sipx,sizeof(sipx));
+ return(0);
+}
+
+int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
+{
+ /* NULL here for pt means the packet was looped back */
+ ipx_socket *sock;
+ ipx_packet *ipx;
+ ipx_route *rt;
+ ipx_route *ln;
+ unsigned char IPXaddr[6];
+
+ ipx=(ipx_packet *)skb->h.raw;
+
+ if(ipx->ipx_checksum!=IPX_NO_CHECKSUM)
+ {
+ /* We don't do checksum options. We can't really. Novell don't seem to have documented them.
+ If you need them try the XNS checksum since IPX is basically XNS in disguise. It might be
+ the same... */
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ /* Too small */
+ if(htons(ipx->ipx_pktsize)<sizeof(ipx_packet))
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ /* Too many hops */
+ if(ipx->ipx_tctrl>16)
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ /* Determine what local ipx endpoint this is */
+ ln = ipxrtr_get_local_net(dev, pt->type);
+ if (ln == NULL)
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ memset(IPXaddr, '\0', 6);
+ memcpy(IPXaddr+(6 - dev->addr_len), dev->dev_addr, dev->addr_len);
+
+ /* Not us/broadcast */
+ if(memcmp(IPXaddr,ipx->ipx_dest.node,6)!=0
+ && memcmp(ipx_broadcast_node,ipx->ipx_dest.node,6)!=0)
+ {
+ /**********************************************************************************************
+
+ IPX router. Roughly as per the Novell spec. This doesn't handle netbios flood fill
+ broadcast frames. See the Novell IPX router specification for more details
+ (for ftp from ftp.novell.com)
+
+ ***********************************************************************************************/
+
+ int incoming_size;
+ int outgoing_size;
+ struct sk_buff *skb2;
+ int free_it=0;
+
+ /* Rule: Don't forward packets that have exceeded the hop limit. This is fixed at 16 in IPX */
+ if((ipx->ipx_tctrl==16) || (skb->pkt_type!=PACKET_HOST))
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ ipx->ipx_tctrl++;
+ /* Don't forward if we don't have a route. We ought to go off and start hunting out routes but
+ if someone needs this _THEY_ can add it */
+ rt=ipxrtr_get_dev(ipx->ipx_dest.net);
+ if(rt==NULL) /* Unlike IP we can send on the interface we received. Eg doing DIX/802.3 conversion */
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ /* Check for differences in outgoing and incoming packet size */
+ incoming_size = skb->len - ntohs(ipx->ipx_pktsize);
+ outgoing_size = rt->datalink->header_length + rt->dev->hard_header_len;
+ if(incoming_size != outgoing_size)
+ {
+ /* A different header length causes a copy. Awkward to avoid with the current
+ sk_buff stuff. */
+ skb2=alloc_skb(ntohs(ipx->ipx_pktsize) + outgoing_size,
+ GFP_ATOMIC);
+ if(skb2==NULL)
+ {
+ kfree_skb(skb,FREE_READ);
+ return 0;
+ }
+ free_it=1;
+ skb2->free=1;
+ skb2->len=ntohs(ipx->ipx_pktsize) + outgoing_size;
+ skb2->mem_addr = skb2;
+ skb2->arp = 1;
+ skb2->sk = NULL;
+
+ /* Need to copy with appropriate offsets */
+ memcpy((char *)(skb2+1)+outgoing_size,
+ (char *)(skb+1)+incoming_size,
+ ntohs(ipx->ipx_pktsize));
+ }
+ else
+ {
+ skb2=skb;
+ }
+
+ /* Now operate on the buffer */
+ /* Increase hop count */
+
+ skb2->dev = rt->dev;
+ rt->datalink->datalink_header(rt->datalink, skb2,
+ (rt->flags&IPX_RT_ROUTED)?rt->router_node
+ :ipx->ipx_dest.node);
+
+ dev_queue_xmit(skb2,rt->dev,SOPRI_NORMAL);
+
+ if(free_it)
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+ /************ End of router: Now sanity check stuff for us ***************/
+
+ /* Ok its for us ! */
+ if (ln->net == 0L) {
+/* printk("IPX: Registering local net %lx\n", ipx->ipx_dest.net);*/
+ ln->net = ipx->ipx_dest.net;
+ }
+
+ sock=ipx_find_socket(ipx->ipx_dest.sock);
+ if(sock==NULL) /* But not one of our sockets */
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ /* Check to see if this socket needs its network number */
+ ln = ipxrtr_get_default_net();
+ if (sock->ipx_source_addr.net == 0L)
+ sock->ipx_source_addr.net = ln->net;
+
+ if(sock_queue_rcv_skb(sock, skb)<0)
+ {
+ kfree_skb(skb,FREE_READ); /* Socket is full */
+ return(0);
+ }
+
+ return(0);
+}
+
+static int ipx_sendto(struct socket *sock, void *ubuf, int len, int noblock,
+ unsigned flags, struct sockaddr *usip, int addr_len)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+ struct sockaddr_ipx *usipx=(struct sockaddr_ipx *)usip;
+ struct sockaddr_ipx local_sipx;
+ struct sk_buff *skb;
+ struct device *dev;
+ struct ipx_packet *ipx;
+ int size;
+ ipx_route *rt;
+ struct datalink_proto *dl = NULL;
+ unsigned char IPXaddr[6];
+ int self_addressing = 0;
+ int broadcast = 0;
+
+ if(flags)
+ return -EINVAL;
+
+ if(usipx)
+ {
+ if(sk->ipx_source_addr.net==0)
+ /* put the autobinding in */
+ {
+ struct sockaddr_ipx uaddr;
+ int ret;
+
+ uaddr.sipx_port = 0;
+ uaddr.sipx_network = 0L;
+ ret = ipx_bind (sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx));
+ if (ret != 0) return (ret);
+ }
+
+ if(addr_len <sizeof(*usipx))
+ return(-EINVAL);
+ if(usipx->sipx_family != AF_IPX)
+ return -EINVAL;
+ if(htons(usipx->sipx_port)<0x4000 && !suser())
+ return -EPERM;
+ }
+ else
+ {
+ if(sk->state!=TCP_ESTABLISHED)
+ return -ENOTCONN;
+ usipx=&local_sipx;
+ usipx->sipx_family=AF_IPX;
+ usipx->sipx_type=sk->ipx_type;
+ usipx->sipx_port=sk->ipx_dest_addr.sock;
+ usipx->sipx_network=sk->ipx_dest_addr.net;
+ memcpy(usipx->sipx_node,sk->ipx_dest_addr.node,sizeof(usipx->sipx_node));
+ }
+
+ if(sk->debug)
+ printk("IPX: sendto: Addresses built.\n");
+
+ if(memcmp(&usipx->sipx_node,&ipx_broadcast_node,6)==0)
+ {
+ if (!sk->broadcast)
+ return -ENETUNREACH;
+ broadcast = 1;
+ }
+
+ /* Build a packet */
+
+ if(sk->debug)
+ printk("IPX: sendto: building packet.\n");
+
+ size=sizeof(ipx_packet)+len; /* For mac headers */
+
+ /* Find out where this has to go */
+ if (usipx->sipx_network == 0L) {
+ rt = ipxrtr_get_default_net();
+ if (rt != NULL)
+ usipx->sipx_network = rt->net;
+ } else
+ rt=ipxrtr_get_dev(usipx->sipx_network);
+
+ if(rt==NULL)
+ {
+ return -ENETUNREACH;
+ }
+
+ dev=rt->dev;
+ dl = rt->datalink;
+
+ size += dev->hard_header_len;
+ size += dl->header_length;
+
+ if(sk->debug)
+ printk("IPX: sendto: allocating buffer (%d)\n",size);
+
+ if(size+sk->wmem_alloc>sk->sndbuf) {
+ return -EAGAIN;
+ }
+
+ skb=alloc_skb(size,GFP_KERNEL);
+ if(skb==NULL)
+ return -ENOMEM;
+
+ skb->mem_addr=skb;
+ skb->sk=sk;
+ skb->free=1;
+ skb->arp=1;
+ skb->len=size;
+
+ sk->wmem_alloc+=skb->mem_len;
+
+ if(sk->debug)
+ printk("Building MAC header.\n");
+ skb->dev=rt->dev;
+
+ /* Build Data Link header */
+ dl->datalink_header(dl, skb,
+ (rt->flags&IPX_RT_ROUTED)?rt->router_node:usipx->sipx_node);
+
+ /* See if we are sending to ourself */
+ memset(IPXaddr, '\0', 6);
+ memcpy(IPXaddr+(6 - skb->dev->addr_len), skb->dev->dev_addr,
+ skb->dev->addr_len);
+
+ self_addressing = !memcmp(IPXaddr,
+ (rt->flags&IPX_RT_ROUTED)?rt->router_node
+ :usipx->sipx_node,
+ 6);
+
+ /* Now the IPX */
+ if(sk->debug)
+ printk("Building IPX Header.\n");
+ ipx=(ipx_packet *)skb->h.raw;
+ ipx->ipx_checksum=0xFFFF;
+ ipx->ipx_pktsize=htons(len+sizeof(ipx_packet));
+ ipx->ipx_tctrl=0;
+ ipx->ipx_type=usipx->sipx_type;
+
+ memcpy(&ipx->ipx_source,&sk->ipx_source_addr,sizeof(ipx->ipx_source));
+ ipx->ipx_dest.net=usipx->sipx_network;
+ memcpy(ipx->ipx_dest.node,usipx->sipx_node,sizeof(ipx->ipx_dest.node));
+ ipx->ipx_dest.sock=usipx->sipx_port;
+ if(sk->debug)
+ printk("IPX: Appending user data.\n");
+ /* User data follows immediately after the IPX data */
+ memcpy_fromfs((char *)(ipx+1),ubuf,len);
+ if(sk->debug)
+ printk("IPX: Transmitting buffer\n");
+ if((dev->flags&IFF_LOOPBACK) || self_addressing) {
+ struct packet_type pt;
+
+ /* loop back */
+ pt.type = rt->dlink_type;
+ sk->wmem_alloc-=skb->mem_len;
+ skb->sk = NULL;
+ ipx_rcv(skb,dev,&pt);
+ } else {
+ if (broadcast) {
+ struct packet_type pt;
+ struct sk_buff *skb2;
+
+ /* loop back */
+ pt.type = rt->dlink_type;
+
+ skb2=alloc_skb(skb->len, GFP_ATOMIC);
+ skb2->mem_addr=skb2;
+ skb2->free=1;
+ skb2->arp=1;
+ skb2->len=skb->len;
+ skb2->sk = NULL;
+ skb2->h.raw = skb2->data + rt->datalink->header_length
+ + dev->hard_header_len;
+ memcpy(skb2->data, skb->data, skb->len);
+ ipx_rcv(skb2,dev,&pt);
+ }
+ dev_queue_xmit(skb,dev,SOPRI_NORMAL);
+ }
+ return len;
+}
+
+static int ipx_send(struct socket *sock, void *ubuf, int size, int noblock, unsigned flags)
+{
+ return ipx_sendto(sock,ubuf,size,noblock,flags,NULL,0);
+}
+
+static int ipx_recvfrom(struct socket *sock, void *ubuf, int size, int noblock,
+ unsigned flags, struct sockaddr *sip, int *addr_len)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+ struct sockaddr_ipx *sipx=(struct sockaddr_ipx *)sip;
+ struct ipx_packet *ipx = NULL;
+ /* FILL ME IN */
+ int copied = 0;
+ struct sk_buff *skb;
+ int er;
+
+ if(sk->err)
+ {
+ er= -sk->err;
+ sk->err=0;
+ return er;
+ }
+
+ if(addr_len)
+ *addr_len=sizeof(*sipx);
+
+ skb=skb_recv_datagram(sk,flags,noblock,&er);
+ if(skb==NULL)
+ return er;
+
+ ipx = (ipx_packet *)(skb->h.raw);
+ copied=ntohs(ipx->ipx_pktsize) - sizeof(ipx_packet);
+ skb_copy_datagram(skb,sizeof(struct ipx_packet),ubuf,copied);
+
+ if(sipx)
+ {
+ sipx->sipx_family=AF_IPX;
+ sipx->sipx_port=ipx->ipx_source.sock;
+ memcpy(sipx->sipx_node,ipx->ipx_source.node,sizeof(sipx->sipx_node));
+ sipx->sipx_network=ipx->ipx_source.net;
+ sipx->sipx_type = ipx->ipx_type;
+ }
+ skb_free_datagram(skb);
+ return(copied);
+}
+
+
+static int ipx_write(struct socket *sock, char *ubuf, int size, int noblock)
+{
+ return ipx_send(sock,ubuf,size,noblock,0);
+}
+
+
+static int ipx_recv(struct socket *sock, void *ubuf, int size , int noblock,
+ unsigned flags)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+ if(sk->zapped)
+ return -ENOTCONN;
+ return ipx_recvfrom(sock,ubuf,size,noblock,flags,NULL, NULL);
+}
+
+static int ipx_read(struct socket *sock, char *ubuf, int size, int noblock)
+{
+ return ipx_recv(sock,ubuf,size,noblock,0);
+}
+
+
+static int ipx_shutdown(struct socket *sk,int how)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ipx_select(struct socket *sock , int sel_type, select_table *wait)
+{
+ ipx_socket *sk=(ipx_socket *)sock->data;
+
+ return datagram_select(sk,sel_type,wait);
+}
+
+static int ipx_ioctl(struct socket *sock,unsigned int cmd, unsigned long arg)
+{
+ int err;
+ long amount=0;
+ ipx_socket *sk=(ipx_socket *)sock->data;
+
+ switch(cmd)
+ {
+ case TIOCOUTQ:
+ err=verify_area(VERIFY_WRITE,(void *)arg,sizeof(unsigned long));
+ if(err)
+ return err;
+ amount=sk->sndbuf-sk->wmem_alloc;
+ if(amount<0)
+ amount=0;
+ put_fs_long(amount,(unsigned long *)arg);
+ return 0;
+ case TIOCINQ:
+ {
+ struct sk_buff *skb;
+ /* These two are safe on a single CPU system as only user tasks fiddle here */
+ if((skb=skb_peek(&sk->receive_queue))!=NULL)
+ amount=skb->len;
+ err=verify_area(VERIFY_WRITE,(void *)arg,sizeof(unsigned long));
+ put_fs_long(amount,(unsigned long *)arg);
+ return 0;
+ }
+ case SIOCADDRT:
+ case SIOCDELRT:
+ if(!suser())
+ return -EPERM;
+ return(ipxrtr_ioctl(cmd,(void *)arg));
+ case SIOCGSTAMP:
+ if (sk)
+ {
+ if(sk->stamp.tv_sec==0)
+ return -ENOENT;
+ err=verify_area(VERIFY_WRITE,(void *)arg,sizeof(struct timeval));
+ if(err)
+ return err;
+ memcpy_tofs((void *)arg,&sk->stamp,sizeof(struct timeval));
+ return 0;
+ }
+ return -EINVAL;
+ case SIOCGIFCONF:
+ case SIOCGIFFLAGS:
+ case SIOCSIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ case SIOCGIFMEM:
+ case SIOCSIFMEM:
+ case SIOCGIFMTU:
+ case SIOCSIFMTU:
+ case SIOCSIFLINK:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case OLD_SIOCGIFHWADDR:
+ return(dev_ioctl(cmd,(void *) arg));
+
+
+ default:
+ return -EINVAL;
+ }
+ /*NOTREACHED*/
+ return(0);
+}
+
+static struct proto_ops ipx_proto_ops = {
+ AF_IPX,
+
+ ipx_create,
+ ipx_dup,
+ ipx_release,
+ ipx_bind,
+ ipx_connect,
+ ipx_socketpair,
+ ipx_accept,
+ ipx_getname,
+ ipx_read,
+ ipx_write,
+ ipx_select,
+ ipx_ioctl,
+ ipx_listen,
+ ipx_send,
+ ipx_recv,
+ ipx_sendto,
+ ipx_recvfrom,
+ ipx_shutdown,
+ ipx_setsockopt,
+ ipx_getsockopt,
+ ipx_fcntl,
+};
+
+/* Called by ddi.c on kernel start up */
+
+static struct packet_type ipx_8023_packet_type =
+{
+ 0, /* MUTTER ntohs(ETH_P_8023),*/
+ 0, /* copy */
+ ipx_rcv,
+ NULL,
+ NULL,
+};
+
+static struct packet_type ipx_dix_packet_type =
+{
+ 0, /* MUTTER ntohs(ETH_P_IPX),*/
+ 0, /* copy */
+ ipx_rcv,
+ NULL,
+ NULL,
+};
+
+
+extern struct datalink_proto *make_EII_client(void);
+extern struct datalink_proto *make_8023_client(void);
+
+void ipx_proto_init(struct net_proto *pro)
+{
+ unsigned char val = 0xE0;
+ (void) sock_register(ipx_proto_ops.family, &ipx_proto_ops);
+
+ pEII_datalink = make_EII_client();
+ ipx_dix_packet_type.type=htons(ETH_P_IPX);
+ dev_add_pack(&ipx_dix_packet_type);
+
+ p8023_datalink = make_8023_client();
+ ipx_8023_packet_type.type=htons(ETH_P_802_3);
+ dev_add_pack(&ipx_8023_packet_type);
+
+ if ((p8022_datalink = register_8022_client(val, ipx_rcv)) == NULL)
+ printk("IPX: Unable to register with 802.2\n");
+
+ printk("Swansea University Computer Society IPX 0.29 BETA for NET3.017\n");
+
+}
+#endif
diff --git a/net/inet/ipx.h b/net/inet/ipx.h
new file mode 100644
index 000000000..7a4cf6a0e
--- /dev/null
+++ b/net/inet/ipx.h
@@ -0,0 +1,71 @@
+
+/*
+ * The following information is in its entirety obtained from:
+ *
+ * Novell 'IPX Router Specification' Version 1.10
+ * Part No. 107-000029-001
+ *
+ * Which is available from ftp.novell.com
+ */
+
+#ifndef _NET_INET_IPX_H_
+#define _NET_INET_IPX_H_
+
+#include <linux/ipx.h>
+#include "datalink.h"
+
+typedef struct
+{
+ unsigned long net;
+ unsigned char node[6];
+ unsigned short sock;
+} ipx_address;
+
+#define ipx_broadcast_node "\377\377\377\377\377\377"
+
+typedef struct ipx_packet
+{
+ unsigned short ipx_checksum;
+#define IPX_NO_CHECKSUM 0xFFFF
+ unsigned short ipx_pktsize;
+ unsigned char ipx_tctrl;
+ unsigned char ipx_type;
+#define IPX_TYPE_UNKNOWN 0x00
+#define IPX_TYPE_RIP 0x01 /* may also be 0 */
+#define IPX_TYPE_SAP 0x04 /* may also be 0 */
+#define IPX_TYPE_SPX 0x05 /* Not yet implemented */
+#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
+#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast [Not supported] */
+ ipx_address ipx_dest __attribute__ ((packed));
+ ipx_address ipx_source __attribute__ ((packed));
+} ipx_packet;
+
+
+typedef struct ipx_route
+{
+ unsigned long net;
+ unsigned char router_node[6];
+ unsigned long router_net;
+ unsigned short flags;
+#define IPX_RT_ROUTED 1 /* This isn't a direct route. Send via this if to node router_node */
+#define IPX_RT_BLUEBOOK 2
+#define IPX_RT_8022 4
+#define IPX_RT_SNAP 8
+ unsigned short dlink_type;
+ struct device *dev;
+ struct datalink_proto *datalink;
+ struct ipx_route *next;
+ struct ipx_route *nextlocal;
+} ipx_route;
+
+
+typedef struct sock ipx_socket;
+
+
+#include "ipxcall.h"
+extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt);
+extern void ipxrtr_device_down(struct device *dev);
+
+
+
+#endif
diff --git a/net/inet/ipxcall.h b/net/inet/ipxcall.h
new file mode 100644
index 000000000..eb5bd2bd2
--- /dev/null
+++ b/net/inet/ipxcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void ipx_proto_init(struct net_proto *pro);
diff --git a/net/inet/p8022.c b/net/inet/p8022.c
new file mode 100644
index 000000000..f145a836e
--- /dev/null
+++ b/net/inet/p8022.c
@@ -0,0 +1,97 @@
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include "datalink.h"
+#include <linux/mm.h>
+#include <linux/in.h>
+
+static struct datalink_proto *p8022_list = NULL;
+
+static struct datalink_proto *
+find_8022_client(unsigned char type)
+{
+ struct datalink_proto *proto;
+
+ for (proto = p8022_list;
+ ((proto != NULL) && (*(proto->type) != type));
+ proto = proto->next)
+ ;
+
+ return proto;
+}
+
+int
+p8022_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
+{
+ struct datalink_proto *proto;
+
+ proto = find_8022_client(*(skb->h.raw));
+ if (proto != NULL) {
+ skb->h.raw += 3;
+ return proto->rcvfunc(skb, dev, pt);
+ }
+
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ return 0;
+}
+
+static void
+p8022_datalink_header(struct datalink_proto *dl,
+ struct sk_buff *skb, unsigned char *dest_node)
+{
+ struct device *dev = skb->dev;
+ unsigned long len = skb->len;
+ unsigned long hard_len = dev->hard_header_len;
+ unsigned char *rawp;
+
+ dev->hard_header(skb->data, dev, len - hard_len,
+ dest_node, NULL, len - hard_len, skb);
+ rawp = skb->data + hard_len;
+ *rawp = dl->type[0];
+ rawp++;
+ *rawp = dl->type[0];
+ rawp++;
+ *rawp = 0x03; /* UI */
+ rawp++;
+ skb->h.raw = rawp;
+}
+
+static struct packet_type p8022_packet_type =
+{
+ 0, /* MUTTER ntohs(ETH_P_IPX),*/
+ 0, /* copy */
+ p8022_rcv,
+ NULL,
+ NULL,
+};
+
+
+void p8022_proto_init(struct net_proto *pro)
+{
+ p8022_packet_type.type=htons(ETH_P_802_2);
+ dev_add_pack(&p8022_packet_type);
+}
+
+struct datalink_proto *
+register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *))
+{
+ struct datalink_proto *proto;
+
+ if (find_8022_client(type) != NULL)
+ return NULL;
+
+ proto = (struct datalink_proto *) kmalloc(sizeof(*proto), GFP_ATOMIC);
+ if (proto != NULL) {
+ proto->type[0] = type;
+ proto->type_len = 1;
+ proto->rcvfunc = rcvfunc;
+ proto->header_length = 3;
+ proto->datalink_header = p8022_datalink_header;
+
+ proto->next = p8022_list;
+ p8022_list = proto;
+ }
+
+ return proto;
+}
+
diff --git a/net/inet/p8022.h b/net/inet/p8022.h
new file mode 100644
index 000000000..52c676be2
--- /dev/null
+++ b/net/inet/p8022.h
@@ -0,0 +1,2 @@
+struct datalink_proto *register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+
diff --git a/net/inet/p8022call.h b/net/inet/p8022call.h
new file mode 100644
index 000000000..14f0c2cee
--- /dev/null
+++ b/net/inet/p8022call.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void p8022_proto_init(struct net_proto *);
diff --git a/net/inet/p8023.c b/net/inet/p8023.c
new file mode 100644
index 000000000..b5196e409
--- /dev/null
+++ b/net/inet/p8023.c
@@ -0,0 +1,34 @@
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include "datalink.h"
+#include <linux/mm.h>
+#include <linux/in.h>
+
+static void
+p8023_datalink_header(struct datalink_proto *dl,
+ struct sk_buff *skb, unsigned char *dest_node)
+{
+ struct device *dev = skb->dev;
+ unsigned long len = skb->len;
+ unsigned long hard_len = dev->hard_header_len;
+
+ dev->hard_header(skb->data, dev, len - hard_len,
+ dest_node, NULL, len - hard_len, skb);
+ skb->h.raw = skb->data + hard_len;
+}
+
+struct datalink_proto *
+make_8023_client(void)
+{
+ struct datalink_proto *proto;
+
+ proto = (struct datalink_proto *) kmalloc(sizeof(*proto), GFP_ATOMIC);
+ if (proto != NULL) {
+ proto->type_len = 0;
+ proto->header_length = 0;
+ proto->datalink_header = p8023_datalink_header;
+ }
+
+ return proto;
+}
+
diff --git a/net/inet/packet.c b/net/inet/packet.c
new file mode 100644
index 000000000..0f6c3698d
--- /dev/null
+++ b/net/inet/packet.c
@@ -0,0 +1,391 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * PACKET - implements raw packet sockets.
+ *
+ * Version: @(#)packet.c 1.0.6 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Alan Cox : verify_area() now used correctly
+ * Alan Cox : new skbuff lists, look ma no backlogs!
+ * Alan Cox : tidied skbuff lists.
+ * Alan Cox : Now uses generic datagram routines I
+ * added. Also fixed the peek/read crash
+ * from all old Linux datagram code.
+ * Alan Cox : Uses the improved datagram code.
+ * Alan Cox : Added NULL's for socket options.
+ * Alan Cox : Re-commented the code.
+ * Alan Cox : Use new kernel side addressing
+ * Rob Janssen : Correct MTU usage.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fcntl.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+
+/*
+ * We really ought to have a single public _inline_ min function!
+ */
+
+static unsigned long min(unsigned long a, unsigned long b)
+{
+ if (a < b)
+ return(a);
+ return(b);
+}
+
+
+/*
+ * This should be the easiest of all, all we do is copy it into a buffer.
+ */
+
+int packet_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
+{
+ struct sock *sk;
+
+ /*
+ * When we registered the protocol we saved the socket in the data
+ * field for just this event.
+ */
+
+ sk = (struct sock *) pt->data;
+
+ /*
+ * The SOCK_PACKET socket receives _all_ frames, and as such
+ * therefore needs to put the header back onto the buffer.
+ * (it was removed by inet_bh()).
+ */
+
+ skb->dev = dev;
+ skb->len += dev->hard_header_len;
+
+ skb->sk = sk;
+
+ /*
+ * Charge the memory to the socket. This is done specifically
+ * to prevent sockets using all the memory up.
+ */
+
+ if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
+ {
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ return(0);
+ }
+ sk->rmem_alloc += skb->mem_len;
+
+ /*
+ * Queue the packet up, and wake anyone waiting for it.
+ */
+
+ skb_queue_tail(&sk->receive_queue,skb);
+ wake_up_interruptible(sk->sleep);
+
+ /*
+ * Processing complete.
+ */
+
+ release_sock(sk); /* This is now effectively surplus in this layer */
+ return(0);
+}
+
+
+/*
+ * Output a raw packet to a device layer. This bypasses all the other
+ * protocol layers and you must therefore supply it with a complete frame
+ */
+
+static int packet_sendto(struct sock *sk, unsigned char *from, int len,
+ int noblock, unsigned flags, struct sockaddr_in *usin,
+ int addr_len)
+{
+ struct sk_buff *skb;
+ struct device *dev;
+ struct sockaddr *saddr=(struct sockaddr *)usin;
+
+ /*
+ * Check the flags.
+ */
+
+ if (flags)
+ return(-EINVAL);
+
+ /*
+ * Get and verify the address.
+ */
+
+ if (usin)
+ {
+ if (addr_len < sizeof(*saddr))
+ return(-EINVAL);
+ }
+ else
+ return(-EINVAL); /* SOCK_PACKET must be sent giving an address */
+
+ /*
+ * Find the device first to size check it
+ */
+
+ saddr->sa_data[13] = 0;
+ dev = dev_get(saddr->sa_data);
+ if (dev == NULL)
+ {
+ return(-ENXIO);
+ }
+
+ /*
+ * You may not queue a frame bigger than the mtu. This is the lowest level
+ * raw protocol and you must do your own fragmentation at this level.
+ */
+
+ if(len>dev->mtu+dev->hard_header_len)
+ return -EMSGSIZE;
+
+ skb = sk->prot->wmalloc(sk, len, 0, GFP_KERNEL);
+
+ /*
+ * If the write buffer is full, then tough. At this level the user gets to
+ * deal with the problem - do your own algorithmic backoffs.
+ */
+
+ if (skb == NULL)
+ {
+ return(-ENOBUFS);
+ }
+
+ /*
+ * Fill it in
+ */
+
+ skb->sk = sk;
+ skb->free = 1;
+ memcpy_fromfs(skb->data, from, len);
+ skb->len = len;
+ skb->arp = 1; /* No ARP needs doing on this (complete) frame */
+
+ /*
+ * Now send it
+ */
+
+ if (dev->flags & IFF_UP)
+ dev_queue_xmit(skb, dev, sk->priority);
+ else
+ kfree_skb(skb, FREE_WRITE);
+ return(len);
+}
+
+/*
+ * A write to a SOCK_PACKET can't actually do anything useful and will
+ * always fail but we include it for completeness and future expansion.
+ */
+
+static int packet_write(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags)
+{
+ return(packet_sendto(sk, buff, len, noblock, flags, NULL, 0));
+}
+
+/*
+ * Close a SOCK_PACKET socket. This is fairly simple. We immediately go
+ * to 'closed' state and remove our protocol entry in the device list.
+ * The release_sock() will destroy the socket if a user has closed the
+ * file side of the object.
+ */
+
+static void packet_close(struct sock *sk, int timeout)
+{
+ sk->inuse = 1;
+ sk->state = TCP_CLOSE;
+ dev_remove_pack((struct packet_type *)sk->pair);
+ kfree_s((void *)sk->pair, sizeof(struct packet_type));
+ sk->pair = NULL;
+ release_sock(sk);
+}
+
+/*
+ * Create a packet of type SOCK_PACKET. We do one slightly irregular
+ * thing here that wants tidying up. We borrow the 'pair' pointer in
+ * the socket object so we can find the packet_type entry in the
+ * device list. The reverse is easy as we use the data field of the
+ * packet type to point to our socket.
+ */
+
+static int packet_init(struct sock *sk)
+{
+ struct packet_type *p;
+
+ p = (struct packet_type *) kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ return(-ENOMEM);
+
+ p->func = packet_rcv;
+ p->type = sk->num;
+ p->data = (void *)sk;
+ dev_add_pack(p);
+
+ /*
+ * We need to remember this somewhere.
+ */
+
+ sk->pair = (struct sock *)p;
+
+ return(0);
+}
+
+
+/*
+ * Pull a packet from our receive queue and hand it to the user.
+ * If necessary we block.
+ */
+
+int packet_recvfrom(struct sock *sk, unsigned char *to, int len,
+ int noblock, unsigned flags, struct sockaddr_in *sin,
+ int *addr_len)
+{
+ int copied=0;
+ struct sk_buff *skb;
+ struct sockaddr *saddr;
+ int err;
+ int truesize;
+
+ saddr = (struct sockaddr *)sin;
+
+ if (sk->shutdown & RCV_SHUTDOWN)
+ return(0);
+
+ /*
+ * If the address length field is there to be filled in, we fill
+ * it in now.
+ */
+
+ if (addr_len)
+ *addr_len=sizeof(*saddr);
+
+ /*
+ * Call the generic datagram receiver. This handles all sorts
+ * of horrible races and re-entrancy so we can forget about it
+ * in the protocol layers.
+ */
+
+ skb=skb_recv_datagram(sk,flags,noblock,&err);
+
+ /*
+ * An error occurred so return it. Because skb_recv_datagram()
+ * handles the blocking we don't see and worry about blocking
+ * retries.
+ */
+
+ if(skb==NULL)
+ return err;
+
+ /*
+ * You lose any data beyond the buffer you gave. If it worries a
+ * user program they can ask the device for its MTU anyway.
+ */
+
+ truesize = skb->len;
+ copied = min(len, truesize);
+
+ memcpy_tofs(to, skb->data, copied); /* We can't use skb_copy_datagram here */
+
+ /*
+ * Copy the address.
+ */
+
+ if (saddr)
+ {
+ saddr->sa_family = skb->dev->type;
+ memcpy(saddr->sa_data,skb->dev->name, 14);
+ }
+
+ /*
+ * Free or return the buffer as appropriate. Again this hides all the
+ * races and re-entrancy issues from us.
+ */
+
+ skb_free_datagram(skb);
+
+ /*
+ * We are done.
+ */
+
+ release_sock(sk);
+ return(truesize);
+}
+
+
+/*
+ * A packet read can succeed and is just the same as a recvfrom but without the
+ * addresses being recorded.
+ */
+
+int packet_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags)
+{
+ return(packet_recvfrom(sk, buff, len, noblock, flags, NULL, NULL));
+}
+
+
+/*
+ * This structure declares to the lower layer socket subsystem currently
+ * incorrectly embedded in the IP code how to behave. This interface needs
+ * a lot of work and will change.
+ */
+
+struct proto packet_prot =
+{
+ sock_wmalloc,
+ sock_rmalloc,
+ sock_wfree,
+ sock_rfree,
+ sock_rspace,
+ sock_wspace,
+ packet_close,
+ packet_read,
+ packet_write,
+ packet_sendto,
+ packet_recvfrom,
+ ip_build_header, /* Not actually used */
+ NULL,
+ NULL,
+ ip_queue_xmit, /* These two are not actually used */
+ ip_retransmit,
+ NULL,
+ NULL,
+ NULL,
+ datagram_select,
+ NULL,
+ packet_init,
+ NULL,
+ NULL, /* No set/get socket options */
+ NULL,
+ 128,
+ 0,
+ {NULL,},
+ "PACKET"
+};
diff --git a/net/inet/pe2.c b/net/inet/pe2.c
new file mode 100644
index 000000000..15f62b344
--- /dev/null
+++ b/net/inet/pe2.c
@@ -0,0 +1,34 @@
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include "datalink.h"
+#include <linux/mm.h>
+#include <linux/in.h>
+
+static void
+pEII_datalink_header(struct datalink_proto *dl,
+ struct sk_buff *skb, unsigned char *dest_node)
+{
+ struct device *dev = skb->dev;
+ unsigned long len = skb->len;
+ unsigned long hard_len = dev->hard_header_len;
+
+ dev->hard_header(skb->data, dev, ETH_P_IPX,
+ dest_node, NULL, len - hard_len, skb);
+ skb->h.raw = skb->data + hard_len;
+}
+
+struct datalink_proto *
+make_EII_client(void)
+{
+ struct datalink_proto *proto;
+
+ proto = (struct datalink_proto *) kmalloc(sizeof(*proto), GFP_ATOMIC);
+ if (proto != NULL) {
+ proto->type_len = 0;
+ proto->header_length = 0;
+ proto->datalink_header = pEII_datalink_header;
+ }
+
+ return proto;
+}
+
diff --git a/net/inet/proc.c b/net/inet/proc.c
new file mode 100644
index 000000000..c143924bd
--- /dev/null
+++ b/net/inet/proc.c
@@ -0,0 +1,222 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * This file implements the various access functions for the
+ * PROC file system. It is mainly used for debugging and
+ * statistics.
+ *
+ * Version: @(#)proc.c 1.0.5 05/27/93
+ *
+ * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de>
+ * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de>
+ * Erik Schoenfelder, <schoenfr@ibr.cs.tu-bs.de>
+ *
+ * Fixes:
+ * Alan Cox : UDP sockets show the rxqueue/txqueue
+ * using hint flag for the netinfo.
+ * Pauline Middelink : identd support
+ * Alan Cox : Make /proc safer.
+ * Erik Schoenfelder : /proc/net/snmp
+ * Alan Cox : Handle dead sockets properly.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/system.h>
+#include <linux/autoconf.h>
+#include <linux/sched.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <linux/un.h>
+#include <linux/in.h>
+#include <linux/param.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "icmp.h"
+#include "protocol.h"
+#include "tcp.h"
+#include "udp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "raw.h"
+
+/*
+ * Get__netinfo returns the length of that string.
+ *
+ * KNOWN BUGS
+ * As in get_unix_netinfo, the buffer might be too small. If this
+ * happens, get__netinfo returns only part of the available infos.
+ */
+static int
+get__netinfo(struct proto *pro, char *buffer, int format, char **start, off_t offset, int length)
+{
+ struct sock **s_array;
+ struct sock *sp;
+ int i;
+ int timer_active;
+ unsigned long dest, src;
+ unsigned short destp, srcp;
+ int len=0;
+ off_t pos=0;
+ off_t begin=0;
+
+ s_array = pro->sock_array;
+ len+=sprintf(buffer, "sl local_address rem_address st tx_queue rx_queue tr tm->when uid\n");
+/*
+ * This was very pretty but didn't work when a socket is destroyed at the wrong moment
+ * (eg a syn recv socket getting a reset), or a memory timer destroy. Instead of playing
+ * with timers we just concede defeat and cli().
+ */
+ for(i = 0; i < SOCK_ARRAY_SIZE; i++)
+ {
+ cli();
+ sp = s_array[i];
+ while(sp != NULL)
+ {
+ dest = sp->daddr;
+ src = sp->saddr;
+ destp = sp->dummy_th.dest;
+ srcp = sp->dummy_th.source;
+
+ /* Since we are Little Endian we need to swap the bytes :-( */
+ destp = ntohs(destp);
+ srcp = ntohs(srcp);
+ timer_active = del_timer(&sp->timer);
+ if (!timer_active)
+ sp->timer.expires = 0;
+ len+=sprintf(buffer+len, "%2d: %08lX:%04X %08lX:%04X %02X %08lX:%08lX %02X:%08lX %08X %d\n",
+ i, src, srcp, dest, destp, sp->state,
+ format==0?sp->write_seq-sp->rcv_ack_seq:sp->rmem_alloc,
+ format==0?sp->acked_seq-sp->copied_seq:sp->wmem_alloc,
+ timer_active, sp->timer.expires, (unsigned) sp->retransmits,
+ sp->socket?SOCK_INODE(sp->socket)->i_uid:0);
+ if (timer_active)
+ add_timer(&sp->timer);
+ /*
+ * All sockets with (port mod SOCK_ARRAY_SIZE) = i
+ * are kept in sock_array[i], so we must follow the
+ * 'next' link to get them all.
+ */
+ sp = sp->next;
+ pos=begin+len;
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+ sti(); /* We only turn interrupts back on for a moment, but because the interrupt queues anything built up
+ before this will clear before we jump back and cli, so its not as bad as it looks */
+ if(pos>offset+length)
+ break;
+ }
+ *start=buffer+(offset-begin);
+ len-=(offset-begin);
+ if(len>length)
+ len=length;
+ return len;
+}
+
+
+int tcp_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ return get__netinfo(&tcp_prot, buffer,0, start, offset, length);
+}
+
+
+int udp_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ return get__netinfo(&udp_prot, buffer,1, start, offset, length);
+}
+
+
+int raw_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ return get__netinfo(&raw_prot, buffer,1, start, offset, length);
+}
+
+
+/*
+ * Called from the PROCfs module. This outputs /proc/net/snmp.
+ */
+
+int snmp_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ extern struct tcp_mib tcp_statistics;
+ extern struct udp_mib udp_statistics;
+ int len;
+/*
+ extern unsigned long tcp_rx_miss, tcp_rx_hit1,tcp_rx_hit2;
+*/
+
+ len = sprintf (buffer,
+ "Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates\n"
+ "Ip: %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+ ip_statistics.IpForwarding, ip_statistics.IpDefaultTTL,
+ ip_statistics.IpInReceives, ip_statistics.IpInHdrErrors,
+ ip_statistics.IpInAddrErrors, ip_statistics.IpForwDatagrams,
+ ip_statistics.IpInUnknownProtos, ip_statistics.IpInDiscards,
+ ip_statistics.IpInDelivers, ip_statistics.IpOutRequests,
+ ip_statistics.IpOutDiscards, ip_statistics.IpOutNoRoutes,
+ ip_statistics.IpReasmTimeout, ip_statistics.IpReasmReqds,
+ ip_statistics.IpReasmOKs, ip_statistics.IpReasmFails,
+ ip_statistics.IpFragOKs, ip_statistics.IpFragFails,
+ ip_statistics.IpFragCreates);
+
+ len += sprintf (buffer + len,
+ "Icmp: InMsgs InErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps\n"
+ "Icmp: %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+ icmp_statistics.IcmpInMsgs, icmp_statistics.IcmpInErrors,
+ icmp_statistics.IcmpInDestUnreachs, icmp_statistics.IcmpInTimeExcds,
+ icmp_statistics.IcmpInParmProbs, icmp_statistics.IcmpInSrcQuenchs,
+ icmp_statistics.IcmpInRedirects, icmp_statistics.IcmpInEchos,
+ icmp_statistics.IcmpInEchoReps, icmp_statistics.IcmpInTimestamps,
+ icmp_statistics.IcmpInTimestampReps, icmp_statistics.IcmpInAddrMasks,
+ icmp_statistics.IcmpInAddrMaskReps, icmp_statistics.IcmpOutMsgs,
+ icmp_statistics.IcmpOutErrors, icmp_statistics.IcmpOutDestUnreachs,
+ icmp_statistics.IcmpOutTimeExcds, icmp_statistics.IcmpOutParmProbs,
+ icmp_statistics.IcmpOutSrcQuenchs, icmp_statistics.IcmpOutRedirects,
+ icmp_statistics.IcmpOutEchos, icmp_statistics.IcmpOutEchoReps,
+ icmp_statistics.IcmpOutTimestamps, icmp_statistics.IcmpOutTimestampReps,
+ icmp_statistics.IcmpOutAddrMasks, icmp_statistics.IcmpOutAddrMaskReps);
+
+ len += sprintf (buffer + len,
+ "Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs\n"
+ "Tcp: %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+ tcp_statistics.TcpRtoAlgorithm, tcp_statistics.TcpRtoMin,
+ tcp_statistics.TcpRtoMax, tcp_statistics.TcpMaxConn,
+ tcp_statistics.TcpActiveOpens, tcp_statistics.TcpPassiveOpens,
+ tcp_statistics.TcpAttemptFails, tcp_statistics.TcpEstabResets,
+ tcp_statistics.TcpCurrEstab, tcp_statistics.TcpInSegs,
+ tcp_statistics.TcpOutSegs, tcp_statistics.TcpRetransSegs);
+
+ len += sprintf (buffer + len,
+ "Udp: InDatagrams NoPorts InErrors OutDatagrams\nUdp: %lu %lu %lu %lu\n",
+ udp_statistics.UdpInDatagrams, udp_statistics.UdpNoPorts,
+ udp_statistics.UdpInErrors, udp_statistics.UdpOutDatagrams);
+/*
+ len += sprintf( buffer + len,
+ "TCP fast path RX: H2: %ul H1: %ul L: %ul\n",
+ tcp_rx_hit2,tcp_rx_hit1,tcp_rx_miss);
+*/
+
+ if (offset >= len)
+ {
+ *start = buffer;
+ return 0;
+ }
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ return len;
+}
+
diff --git a/net/inet/protocol.c b/net/inet/protocol.c
new file mode 100644
index 000000000..57e552ea7
--- /dev/null
+++ b/net/inet/protocol.c
@@ -0,0 +1,159 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * INET protocol dispatch tables.
+ *
+ * Version: @(#)protocol.c 1.0.5 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Ahah! udp icmp errors don't work because
+ * udp_err is never called!
+ * Alan Cox : Added new fields for init and ready for
+ * proper fragmentation (_NO_ 4K limits!)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "icmp.h"
+#include "udp.h"
+
+
+static struct inet_protocol tcp_protocol = {
+ tcp_rcv, /* TCP handler */
+ NULL, /* No fragment handler (and won't be for a long time) */
+ tcp_err, /* TCP error control */
+ NULL, /* next */
+ IPPROTO_TCP, /* protocol ID */
+ 0, /* copy */
+ NULL, /* data */
+ "TCP" /* name */
+};
+
+
+static struct inet_protocol udp_protocol = {
+ udp_rcv, /* UDP handler */
+ NULL, /* Will be UDP fraglist handler */
+ udp_err, /* UDP error control */
+ &tcp_protocol, /* next */
+ IPPROTO_UDP, /* protocol ID */
+ 0, /* copy */
+ NULL, /* data */
+ "UDP" /* name */
+};
+
+
+static struct inet_protocol icmp_protocol = {
+ icmp_rcv, /* ICMP handler */
+ NULL, /* ICMP never fragments anyway */
+ NULL, /* ICMP error control */
+ &udp_protocol, /* next */
+ IPPROTO_ICMP, /* protocol ID */
+ 0, /* copy */
+ NULL, /* data */
+ "ICMP" /* name */
+};
+
+
+struct inet_protocol *inet_protocol_base = &icmp_protocol;
+struct inet_protocol *inet_protos[MAX_INET_PROTOS] = {
+ NULL
+};
+
+
+struct inet_protocol *
+inet_get_protocol(unsigned char prot)
+{
+ unsigned char hash;
+ struct inet_protocol *p;
+
+ hash = prot & (MAX_INET_PROTOS - 1);
+ for (p = inet_protos[hash] ; p != NULL; p=p->next) {
+ if (p->protocol == prot) return((struct inet_protocol *) p);
+ }
+ return(NULL);
+}
+
+
+void
+inet_add_protocol(struct inet_protocol *prot)
+{
+ unsigned char hash;
+ struct inet_protocol *p2;
+
+ hash = prot->protocol & (MAX_INET_PROTOS - 1);
+ prot ->next = inet_protos[hash];
+ inet_protos[hash] = prot;
+ prot->copy = 0;
+
+ /* Set the copy bit if we need to. */
+ p2 = (struct inet_protocol *) prot->next;
+ while(p2 != NULL) {
+ if (p2->protocol == prot->protocol) {
+ prot->copy = 1;
+ break;
+ }
+ p2 = (struct inet_protocol *) prot->next;
+ }
+}
+
+
+int
+inet_del_protocol(struct inet_protocol *prot)
+{
+ struct inet_protocol *p;
+ struct inet_protocol *lp = NULL;
+ unsigned char hash;
+
+ hash = prot->protocol & (MAX_INET_PROTOS - 1);
+ if (prot == inet_protos[hash]) {
+ inet_protos[hash] = (struct inet_protocol *) inet_protos[hash]->next;
+ return(0);
+ }
+
+ p = (struct inet_protocol *) inet_protos[hash];
+ while(p != NULL) {
+ /*
+ * We have to worry if the protocol being deleted is
+ * the last one on the list, then we may need to reset
+ * someone's copied bit.
+ */
+ if (p->next != NULL && p->next == prot) {
+ /*
+ * if we are the last one with this protocol and
+ * there is a previous one, reset its copy bit.
+ */
+ if (p->copy == 0 && lp != NULL) lp->copy = 0;
+ p->next = prot->next;
+ return(0);
+ }
+
+ if (p->next != NULL && p->next->protocol == prot->protocol) {
+ lp = p;
+ }
+
+ p = (struct inet_protocol *) p->next;
+ }
+ return(-1);
+}
diff --git a/net/inet/protocol.h b/net/inet/protocol.h
new file mode 100644
index 000000000..3e0b6fb3c
--- /dev/null
+++ b/net/inet/protocol.h
@@ -0,0 +1,59 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the protocol dispatcher.
+ *
+ * Version: @(#)protocol.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes:
+ * Alan Cox : Added a name field and a frag handler
+ * field for later.
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+
+#define MAX_INET_PROTOS 32 /* Must be a power of 2 */
+
+
+/* This is used to register protocols. */
+struct inet_protocol {
+ int (*handler)(struct sk_buff *skb, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr,
+ int redo, struct inet_protocol *protocol);
+ int (*frag_handler)(struct sk_buff *skb, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr,
+ int redo, struct inet_protocol *protocol);
+ void (*err_handler)(int err, unsigned char *buff,
+ unsigned long daddr,
+ unsigned long saddr,
+ struct inet_protocol *protocol);
+ struct inet_protocol *next;
+ unsigned char protocol;
+ unsigned char copy:1;
+ void *data;
+ char *name;
+};
+
+
+extern struct inet_protocol *inet_protocol_base;
+extern struct inet_protocol *inet_protos[MAX_INET_PROTOS];
+
+
+extern void inet_add_protocol(struct inet_protocol *prot);
+extern int inet_del_protocol(struct inet_protocol *prot);
+
+
+#endif /* _PROTOCOL_H */
diff --git a/net/inet/rarp.c b/net/inet/rarp.c
new file mode 100644
index 000000000..94db10cf8
--- /dev/null
+++ b/net/inet/rarp.c
@@ -0,0 +1,490 @@
+/* linux/net/inet/rarp.c
+ *
+ * Copyright (C) 1994 by Ross Martin
+ * Based on linux/net/inet/arp.c, Copyright (C) 1994 by Florian La Roche
+ *
+ * This module implements the Reverse Address Resolution Protocol
+ * (RARP, RFC 903), which is used to convert low level addresses such
+ * as ethernet addresses into high level addresses such as IP addresses.
+ * The most common use of RARP is as a means for a diskless workstation
+ * to discover its IP address during a network boot.
+ *
+ **
+ *** WARNING:::::::::::::::::::::::::::::::::WARNING
+ ****
+ ***** SUN machines seem determined to boot solely from the person who
+ **** answered their RARP query. NEVER add a SUN to your RARP table
+ *** unless you have all the rest to boot the box from it.
+ **
+ *
+ * Currently, only ethernet address -> IP address is likely to work.
+ * (Is RARP ever used for anything else?)
+ *
+ * This code is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/config.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <stdarg.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "ip.h"
+#include "route.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "arp.h"
+#include "rarp.h"
+#ifdef CONFIG_AX25
+#include "ax25.h"
+#endif
+
+#ifdef CONFIG_INET_RARP
+
+/*
+ * This structure defines the RARP mapping cache. As long as we make
+ * changes in this structure, we keep interrupts off.
+ */
+
+struct rarp_table
+{
+ struct rarp_table *next; /* Linked entry list */
+ unsigned long ip; /* ip address of entry */
+ unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
+ unsigned char hlen; /* Length of hardware address */
+ unsigned char htype; /* Type of hardware in use */
+ struct device *dev; /* Device the entry is tied to */
+};
+
+struct rarp_table *rarp_tables = NULL;
+
+
+static struct packet_type rarp_packet_type =
+{
+ 0, /* Should be: __constant_htons(ETH_P_RARP) - but this _doesn't_ come out constant! */
+ 0, /* copy */
+ rarp_rcv,
+ NULL,
+ NULL
+};
+
+static initflag = 1;
+
+/*
+ * Called once when data first added to rarp cache with ioctl.
+ */
+
+static void rarp_init (void)
+{
+ /* Register the packet type */
+ rarp_packet_type.type=htons(ETH_P_RARP);
+ dev_add_pack(&rarp_packet_type);
+}
+
+/*
+ * Release the memory for this entry.
+ */
+
+static inline void rarp_release_entry(struct rarp_table *entry)
+{
+ kfree_s(entry, sizeof(struct rarp_table));
+ return;
+}
+
+/*
+ * Delete a RARP mapping entry in the cache.
+ */
+
+static void rarp_destroy(unsigned long ip_addr)
+{
+ struct rarp_table *entry;
+ struct rarp_table **pentry;
+
+ cli();
+ pentry = &rarp_tables;
+ while ((entry = *pentry) != NULL)
+ {
+ if (entry->ip == ip_addr)
+ {
+ *pentry = entry->next;
+ sti();
+ rarp_release_entry(entry);
+ return;
+ }
+ pentry = &entry->next;
+ }
+ sti();
+}
+
+
+/*
+ * Receive an arp request by the device layer. Maybe it should be
+ * rewritten to use the incoming packet for the reply. The current
+ * "overhead" time isn't that high...
+ */
+
+int rarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
+{
+/*
+ * We shouldn't use this type conversion. Check later.
+ */
+ struct arphdr *rarp = (struct arphdr *)skb->h.raw;
+ unsigned char *rarp_ptr = (unsigned char *)(rarp+1);
+ struct rarp_table *entry;
+ long sip,tip;
+ unsigned char *sha,*tha; /* s for "source", t for "target" */
+
+/*
+ * If this test doesn't pass, its not IP, or we should ignore it anyway
+ */
+
+ if (rarp->ar_hln != dev->addr_len || dev->type != ntohs(rarp->ar_hrd)
+ || dev->flags&IFF_NOARP)
+ {
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+
+/*
+ * If it's not a RARP request, delete it.
+ */
+ if (rarp->ar_op != htons(ARPOP_RREQUEST))
+ {
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+
+/*
+ * For now we will only deal with IP addresses.
+ */
+
+ if (
+#ifdef CONFIG_AX25
+ (rarp->ar_pro != htons(AX25_P_IP) && dev->type == ARPHRD_AX25) ||
+#endif
+ (rarp->ar_pro != htons(ETH_P_IP) && dev->type != ARPHRD_AX25)
+ || rarp->ar_pln != 4)
+ {
+ /*
+ * This packet is not for us. Remove it.
+ */
+ kfree_skb(skb, FREE_READ);
+ return 0;
+}
+
+/*
+ * Extract variable width fields
+ */
+
+ sha=rarp_ptr;
+ rarp_ptr+=dev->addr_len;
+ memcpy(&sip,rarp_ptr,4);
+ rarp_ptr+=4;
+ tha=rarp_ptr;
+ rarp_ptr+=dev->addr_len;
+ memcpy(&tip,rarp_ptr,4);
+
+/*
+ * Process entry
+ */
+
+ cli();
+ for (entry = rarp_tables; entry != NULL; entry = entry->next)
+ if (!memcmp(entry->ha, sha, rarp->ar_hln))
+ break;
+
+ if (entry != NULL)
+ {
+ sip=entry->ip;
+ sti();
+
+ arp_send(ARPOP_RREPLY, ETH_P_RARP, sip, dev, dev->pa_addr, sha,
+ dev->dev_addr);
+ }
+ else
+ sti();
+
+ kfree_skb(skb, FREE_READ);
+ return 0;
+}
+
+
+/*
+ * Set (create) a RARP cache entry.
+ */
+
+static int rarp_req_set(struct arpreq *req)
+{
+ struct arpreq r;
+ struct rarp_table *entry;
+ struct sockaddr_in *si;
+ int htype, hlen;
+ unsigned long ip;
+ struct rtable *rt;
+
+ memcpy_fromfs(&r, req, sizeof(r));
+
+ /*
+ * We only understand about IP addresses...
+ */
+
+ if (r.arp_pa.sa_family != AF_INET)
+ return -EPFNOSUPPORT;
+
+ switch (r.arp_ha.sa_family)
+ {
+ case ARPHRD_ETHER:
+ htype = ARPHRD_ETHER;
+ hlen = ETH_ALEN;
+ break;
+#ifdef CONFIG_AX25
+ case ARPHRD_AX25:
+ htype = ARPHRD_AX25;
+ hlen = 7;
+ break;
+#endif
+ default:
+ return -EPFNOSUPPORT;
+ }
+
+ si = (struct sockaddr_in *) &r.arp_pa;
+ ip = si->sin_addr.s_addr;
+ if (ip == 0)
+ {
+ printk("RARP: SETRARP: requested PA is 0.0.0.0 !\n");
+ return -EINVAL;
+ }
+
+/*
+ * Is it reachable directly ?
+ */
+
+ rt = ip_rt_route(ip, NULL, NULL);
+ if (rt == NULL)
+ return -ENETUNREACH;
+
+/*
+ * Is there an existing entry for this address? Find out...
+ */
+
+ cli();
+ for (entry = rarp_tables; entry != NULL; entry = entry->next)
+ if (entry->ip == ip)
+ break;
+
+/*
+ * If no entry was found, create a new one.
+ */
+
+ if (entry == NULL)
+ {
+ entry = (struct rarp_table *) kmalloc(sizeof(struct rarp_table),
+ GFP_ATOMIC);
+ if (entry == NULL)
+ {
+ sti();
+ return -ENOMEM;
+ }
+ if(initflag)
+ {
+ rarp_init();
+ initflag=0;
+ }
+
+ entry->next = rarp_tables;
+ rarp_tables = entry;
+ }
+
+ entry->ip = ip;
+ entry->hlen = hlen;
+ entry->htype = htype;
+ memcpy(&entry->ha, &r.arp_ha.sa_data, hlen);
+ entry->dev = rt->rt_dev;
+
+ sti();
+
+ return 0;
+}
+
+
+/*
+ * Get a RARP cache entry.
+ */
+
+static int rarp_req_get(struct arpreq *req)
+{
+ struct arpreq r;
+ struct rarp_table *entry;
+ struct sockaddr_in *si;
+ unsigned long ip;
+
+/*
+ * We only understand about IP addresses...
+ */
+
+ memcpy_fromfs(&r, req, sizeof(r));
+
+ if (r.arp_pa.sa_family != AF_INET)
+ return -EPFNOSUPPORT;
+
+/*
+ * Is there an existing entry for this address?
+ */
+
+ si = (struct sockaddr_in *) &r.arp_pa;
+ ip = si->sin_addr.s_addr;
+
+ cli();
+ for (entry = rarp_tables; entry != NULL; entry = entry->next)
+ if (entry->ip == ip)
+ break;
+
+ if (entry == NULL)
+ {
+ sti();
+ return -ENXIO;
+ }
+
+/*
+ * We found it; copy into structure.
+ */
+
+ memcpy(r.arp_ha.sa_data, &entry->ha, entry->hlen);
+ r.arp_ha.sa_family = entry->htype;
+ sti();
+
+/*
+ * Copy the information back
+ */
+
+ memcpy_tofs(req, &r, sizeof(r));
+ return 0;
+}
+
+
+/*
+ * Handle a RARP layer I/O control request.
+ */
+
+int rarp_ioctl(unsigned int cmd, void *arg)
+{
+ struct arpreq r;
+ struct sockaddr_in *si;
+ int err;
+
+ switch(cmd)
+ {
+ case SIOCDRARP:
+ if (!suser())
+ return -EPERM;
+ err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
+ if(err)
+ return err;
+ memcpy_fromfs(&r, arg, sizeof(r));
+ if (r.arp_pa.sa_family != AF_INET)
+ return -EPFNOSUPPORT;
+ si = (struct sockaddr_in *) &r.arp_pa;
+ rarp_destroy(si->sin_addr.s_addr);
+ return 0;
+
+ case SIOCGRARP:
+ err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
+ if(err)
+ return err;
+ return rarp_req_get((struct arpreq *)arg);
+ case SIOCSRARP:
+ if (!suser())
+ return -EPERM;
+ err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
+ if(err)
+ return err;
+ return rarp_req_set((struct arpreq *)arg);
+ default:
+ return -EINVAL;
+ }
+
+ /*NOTREACHED*/
+ return 0;
+}
+
+int rarp_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+ struct rarp_table *entry;
+ char ipbuffer[20];
+ unsigned long netip;
+ if(initflag)
+ {
+ size = sprintf(buffer,"RARP disabled until entries added to cache.\n");
+ pos+=size;
+ len+=size;
+ }
+ else
+ {
+ size = sprintf(buffer,
+ "IP address HW type HW address\n");
+ pos+=size;
+ len+=size;
+
+ cli();
+ for(entry=rarp_tables; entry!=NULL; entry=entry->next)
+ {
+ netip=htonl(entry->ip); /* switch to network order */
+ sprintf(ipbuffer,"%d.%d.%d.%d",
+ (unsigned int)(netip>>24)&255,
+ (unsigned int)(netip>>16)&255,
+ (unsigned int)(netip>>8)&255,
+ (unsigned int)(netip)&255);
+
+ size = sprintf(buffer+len,
+ "%-17s%-20s%02x:%02x:%02x:%02x:%02x:%02x\n",
+ ipbuffer,
+ "10Mbps Ethernet",
+ (unsigned int)entry->ha[0],
+ (unsigned int)entry->ha[1],
+ (unsigned int)entry->ha[2],
+ (unsigned int)entry->ha[3],
+ (unsigned int)entry->ha[4],
+ (unsigned int)entry->ha[5]);
+
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+ sti();
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+
+#endif
diff --git a/net/inet/rarp.h b/net/inet/rarp.h
new file mode 100644
index 000000000..02ee7784f
--- /dev/null
+++ b/net/inet/rarp.h
@@ -0,0 +1,14 @@
+/* linux/net/inet/rarp.h */
+#ifndef _RARP_H
+#define _RARP_H
+
+extern int rarp_ioctl(unsigned int cmd, void *arg);
+extern int rarp_rcv(struct sk_buff *skb,
+ struct device *dev,
+ struct packet_type *pt);
+extern int rarp_get_info(char *buffer,
+ char **start,
+ off_t offset,
+ int length);
+#endif /* _RARP_H */
+
diff --git a/net/inet/raw.c b/net/inet/raw.c
new file mode 100644
index 000000000..b79c1da3c
--- /dev/null
+++ b/net/inet/raw.c
@@ -0,0 +1,354 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * RAW - implementation of IP "raw" sockets.
+ *
+ * Version: @(#)raw.c 1.0.4 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : verify_area() fixed up
+ * Alan Cox : ICMP error handling
+ * Alan Cox : EMSGSIZE if you send too big a packet
+ * Alan Cox : Now uses generic datagrams and shared skbuff
+ * library. No more peek crashes, no more backlogs
+ * Alan Cox : Checks sk->broadcast.
+ * Alan Cox : Uses skb_free_datagram/skb_copy_datagram
+ * Alan Cox : Raw passes ip options too
+ * Alan Cox : Setsocketopt added
+ * Alan Cox : Fixed error return for broadcasts
+ * Alan Cox : Removed wake_up calls
+ * Alan Cox : Use ttl/tos
+ * Alan Cox : Cleaned up old debugging
+ * Alan Cox : Use new kernel side addresses
+ * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "icmp.h"
+#include "udp.h"
+
+
+static inline unsigned long min(unsigned long a, unsigned long b)
+{
+ if (a < b)
+ return(a);
+ return(b);
+}
+
+
+/* raw_err gets called by the icmp module. */
+void raw_err (int err, unsigned char *header, unsigned long daddr,
+ unsigned long saddr, struct inet_protocol *protocol)
+{
+ struct sock *sk;
+
+ if (protocol == NULL)
+ return;
+ sk = (struct sock *) protocol->data;
+ if (sk == NULL)
+ return;
+
+ /* This is meaningless in raw sockets. */
+ if (err & 0xff00 == (ICMP_SOURCE_QUENCH << 8))
+ {
+ if (sk->cong_window > 1) sk->cong_window = sk->cong_window/2;
+ return;
+ }
+
+ sk->err = icmp_err_convert[err & 0xff].errno;
+ sk->error_report(sk);
+
+ return;
+}
+
+
+/*
+ * This should be the easiest of all, all we do is
+ * copy it into a buffer.
+ */
+
+int raw_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
+ unsigned long daddr, unsigned short len, unsigned long saddr,
+ int redo, struct inet_protocol *protocol)
+{
+ struct sock *sk;
+
+ if (skb == NULL)
+ return(0);
+
+ if (protocol == NULL)
+ {
+ kfree_skb(skb, FREE_READ);
+ return(0);
+ }
+
+ sk = (struct sock *) protocol->data;
+ if (sk == NULL)
+ {
+ kfree_skb(skb, FREE_READ);
+ return(0);
+ }
+
+ /* Now we need to copy this into memory. */
+
+ skb->sk = sk;
+ skb->len = len + skb->ip_hdr->ihl*sizeof(long);
+ skb->h.raw = (unsigned char *) skb->ip_hdr;
+ skb->dev = dev;
+ skb->saddr = daddr;
+ skb->daddr = saddr;
+
+ /* Charge it to the socket. */
+
+ if(sock_queue_rcv_skb(sk,skb)<0)
+ {
+ ip_statistics.IpInDiscards++;
+ skb->sk=NULL;
+ kfree_skb(skb, FREE_READ);
+ return(0);
+ }
+
+ ip_statistics.IpInDelivers++;
+ release_sock(sk);
+ return(0);
+}
+
+/*
+ * Send a RAW IP packet.
+ */
+
+static int raw_sendto(struct sock *sk, unsigned char *from,
+ int len, int noblock, unsigned flags, struct sockaddr_in *usin, int addr_len)
+{
+ struct sk_buff *skb;
+ struct device *dev=NULL;
+ struct sockaddr_in sin;
+ int tmp;
+ int err;
+
+ /*
+ * Check the flags. Only MSG_DONTROUTE is permitted.
+ */
+
+ if (flags & ~MSG_DONTROUTE)
+ return(-EINVAL);
+ /*
+ * Get and verify the address.
+ */
+
+ if (usin)
+ {
+ if (addr_len < sizeof(sin))
+ return(-EINVAL);
+ memcpy(&sin, usin, sizeof(sin));
+ if (sin.sin_family && sin.sin_family != AF_INET)
+ return(-EINVAL);
+ }
+ else
+ {
+ if (sk->state != TCP_ESTABLISHED)
+ return(-EINVAL);
+ sin.sin_family = AF_INET;
+ sin.sin_port = sk->protocol;
+ sin.sin_addr.s_addr = sk->daddr;
+ }
+ if (sin.sin_port == 0)
+ sin.sin_port = sk->protocol;
+
+ if (sin.sin_addr.s_addr == INADDR_ANY)
+ sin.sin_addr.s_addr = ip_my_addr();
+
+ if (sk->broadcast == 0 && ip_chk_addr(sin.sin_addr.s_addr)==IS_BROADCAST)
+ return -EACCES;
+
+ skb=sock_alloc_send_skb(sk, len+sk->prot->max_header, noblock, &err);
+ if(skb==NULL)
+ return err;
+
+ skb->sk = sk;
+ skb->free = 1;
+ skb->localroute = sk->localroute | (flags&MSG_DONTROUTE);
+
+ tmp = sk->prot->build_header(skb, sk->saddr,
+ sin.sin_addr.s_addr, &dev,
+ sk->protocol, sk->opt, skb->mem_len, sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ kfree_skb(skb,FREE_WRITE);
+ release_sock(sk);
+ return(tmp);
+ }
+
+ memcpy_fromfs(skb->data + tmp, from, len);
+
+ /*
+ * If we are using IPPROTO_RAW, we need to fill in the source address in
+ * the IP header
+ */
+
+ if(sk->protocol==IPPROTO_RAW)
+ {
+ unsigned char *buff;
+ struct iphdr *iph;
+
+ buff = skb->data;
+ buff += tmp;
+
+ iph = (struct iphdr *)buff;
+ iph->saddr = sk->saddr;
+ }
+
+ skb->len = tmp + len;
+
+ sk->prot->queue_xmit(sk, dev, skb, 1);
+ release_sock(sk);
+ return(len);
+}
+
+
+static int raw_write(struct sock *sk, unsigned char *buff, int len, int noblock,
+ unsigned flags)
+{
+ return(raw_sendto(sk, buff, len, noblock, flags, NULL, 0));
+}
+
+
+static void raw_close(struct sock *sk, int timeout)
+{
+ sk->inuse = 1;
+ sk->state = TCP_CLOSE;
+
+ inet_del_protocol((struct inet_protocol *)sk->pair);
+ kfree_s((void *)sk->pair, sizeof (struct inet_protocol));
+ sk->pair = NULL;
+ release_sock(sk);
+}
+
+
+static int raw_init(struct sock *sk)
+{
+ struct inet_protocol *p;
+
+ p = (struct inet_protocol *) kmalloc(sizeof (*p), GFP_KERNEL);
+ if (p == NULL)
+ return(-ENOMEM);
+
+ p->handler = raw_rcv;
+ p->protocol = sk->protocol;
+ p->data = (void *)sk;
+ p->err_handler = raw_err;
+ p->name="USER";
+ p->frag_handler = NULL; /* For now */
+ inet_add_protocol(p);
+
+ /* We need to remember this somewhere. */
+ sk->pair = (struct sock *)p;
+
+ return(0);
+}
+
+
+/*
+ * This should be easy, if there is something there
+ * we return it, otherwise we block.
+ */
+
+int raw_recvfrom(struct sock *sk, unsigned char *to, int len,
+ int noblock, unsigned flags, struct sockaddr_in *sin,
+ int *addr_len)
+{
+ int copied=0;
+ struct sk_buff *skb;
+ int err;
+ int truesize;
+
+ if (sk->shutdown & RCV_SHUTDOWN)
+ return(0);
+
+ if (addr_len)
+ *addr_len=sizeof(*sin);
+
+ skb=skb_recv_datagram(sk,flags,noblock,&err);
+ if(skb==NULL)
+ return err;
+
+ truesize=skb->len;
+ copied = min(len, truesize);
+
+ skb_copy_datagram(skb, 0, to, copied);
+ sk->stamp=skb->stamp;
+
+ /* Copy the address. */
+ if (sin)
+ {
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = skb->daddr;
+ }
+ skb_free_datagram(skb);
+ release_sock(sk);
+ return (truesize); /* len not copied. BSD returns the true size of the message so you know a bit fell off! */
+}
+
+
+int raw_read (struct sock *sk, unsigned char *buff, int len, int noblock,unsigned flags)
+{
+ return(raw_recvfrom(sk, buff, len, noblock, flags, NULL, NULL));
+}
+
+
+struct proto raw_prot = {
+ sock_wmalloc,
+ sock_rmalloc,
+ sock_wfree,
+ sock_rfree,
+ sock_rspace,
+ sock_wspace,
+ raw_close,
+ raw_read,
+ raw_write,
+ raw_sendto,
+ raw_recvfrom,
+ ip_build_header,
+ udp_connect,
+ NULL,
+ ip_queue_xmit,
+ ip_retransmit,
+ NULL,
+ NULL,
+ raw_rcv,
+ datagram_select,
+ NULL,
+ raw_init,
+ NULL,
+ ip_setsockopt,
+ ip_getsockopt,
+ 128,
+ 0,
+ {NULL,},
+ "RAW"
+};
diff --git a/net/inet/raw.h b/net/inet/raw.h
new file mode 100644
index 000000000..80cb4b4bf
--- /dev/null
+++ b/net/inet/raw.h
@@ -0,0 +1,36 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the RAW-IP module.
+ *
+ * Version: @(#)raw.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _RAW_H
+#define _RAW_H
+
+
+extern struct proto raw_prot;
+
+
+extern void raw_err(int err, unsigned char *header, unsigned long daddr,
+ unsigned long saddr, struct inet_protocol *protocol);
+extern int raw_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr,
+ int redo, struct inet_protocol *protocol);
+extern int raw_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int raw_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+
+#endif /* _RAW_H */
diff --git a/net/inet/route.c b/net/inet/route.c
new file mode 100644
index 000000000..58401d742
--- /dev/null
+++ b/net/inet/route.c
@@ -0,0 +1,657 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * ROUTE - implementation of the IP router.
+ *
+ * Version: @(#)route.c 1.0.14 05/31/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
+ *
+ * Fixes:
+ * Alan Cox : Verify area fixes.
+ * Alan Cox : cli() protects routing changes
+ * Rui Oliveira : ICMP routing table updates
+ * (rco@di.uminho.pt) Routing table insertion and update
+ * Linus Torvalds : Rewrote bits to be sensible
+ * Alan Cox : Added BSD route gw semantics
+ * Alan Cox : Super /proc >4K
+ * Alan Cox : MTU in route table
+ * Alan Cox : MSS actually. Also added the window
+ * clamper.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "route.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "icmp.h"
+
+/*
+ * The routing table list
+ */
+
+static struct rtable *rt_base = NULL;
+
+/*
+ * Pointer to the loopback route
+ */
+
+static struct rtable *rt_loopback = NULL;
+
+/*
+ * Remove a routing table entry.
+ */
+
+static void rt_del(unsigned long dst)
+{
+ struct rtable *r, **rp;
+ unsigned long flags;
+
+ rp = &rt_base;
+
+ /*
+ * This must be done with interrupts off because we could take
+ * an ICMP_REDIRECT.
+ */
+
+ save_flags(flags);
+ cli();
+ while((r = *rp) != NULL)
+ {
+ if (r->rt_dst != dst)
+ {
+ rp = &r->rt_next;
+ continue;
+ }
+ *rp = r->rt_next;
+
+ /*
+ * If we delete the loopback route update its pointer.
+ */
+
+ if (rt_loopback == r)
+ rt_loopback = NULL;
+ kfree_s(r, sizeof(struct rtable));
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Remove all routing table entries for a device. This is called when
+ * a device is downed.
+ */
+
+void ip_rt_flush(struct device *dev)
+{
+ struct rtable *r;
+ struct rtable **rp;
+ unsigned long flags;
+
+ rp = &rt_base;
+ save_flags(flags);
+ cli();
+ while ((r = *rp) != NULL) {
+ if (r->rt_dev != dev) {
+ rp = &r->rt_next;
+ continue;
+ }
+ *rp = r->rt_next;
+ if (rt_loopback == r)
+ rt_loopback = NULL;
+ kfree_s(r, sizeof(struct rtable));
+ }
+ restore_flags(flags);
+}
+
+/*
+ * Used by 'rt_add()' when we can't get the netmask any other way..
+ *
+ * If the lower byte or two are zero, we guess the mask based on the
+ * number of zero 8-bit net numbers, otherwise we use the "default"
+ * masks judging by the destination address and our device netmask.
+ */
+
+static inline unsigned long default_mask(unsigned long dst)
+{
+ dst = ntohl(dst);
+ if (IN_CLASSA(dst))
+ return htonl(IN_CLASSA_NET);
+ if (IN_CLASSB(dst))
+ return htonl(IN_CLASSB_NET);
+ return htonl(IN_CLASSC_NET);
+}
+
+
+/*
+ * If no mask is specified then generate a default entry.
+ */
+
+static unsigned long guess_mask(unsigned long dst, struct device * dev)
+{
+ unsigned long mask;
+
+ if (!dst)
+ return 0;
+ mask = default_mask(dst);
+ if ((dst ^ dev->pa_addr) & mask)
+ return mask;
+ return dev->pa_mask;
+}
+
+
+/*
+ * Find the route entry through which our gateway will be reached
+ */
+
+static inline struct device * get_gw_dev(unsigned long gw)
+{
+ struct rtable * rt;
+
+ for (rt = rt_base ; ; rt = rt->rt_next)
+ {
+ if (!rt)
+ return NULL;
+ if ((gw ^ rt->rt_dst) & rt->rt_mask)
+ continue;
+ /*
+ * Gateways behind gateways are a no-no
+ */
+
+ if (rt->rt_flags & RTF_GATEWAY)
+ return NULL;
+ return rt->rt_dev;
+ }
+}
+
+/*
+ * Rewrote rt_add(), as the old one was weird - Linus
+ *
+ * This routine is used to update the IP routing table, either
+ * from the kernel (ICMP_REDIRECT) or via an ioctl call issued
+ * by the superuser.
+ */
+
+void ip_rt_add(short flags, unsigned long dst, unsigned long mask,
+ unsigned long gw, struct device *dev, unsigned short mtu, unsigned long window)
+{
+ struct rtable *r, *rt;
+ struct rtable **rp;
+ unsigned long cpuflags;
+
+ /*
+ * A host is a unique machine and has no network bits.
+ */
+
+ if (flags & RTF_HOST)
+ {
+ mask = 0xffffffff;
+ }
+
+ /*
+ * Calculate the network mask
+ */
+
+ else if (!mask)
+ {
+ if (!((dst ^ dev->pa_addr) & dev->pa_mask))
+ {
+ mask = dev->pa_mask;
+ flags &= ~RTF_GATEWAY;
+ if (flags & RTF_DYNAMIC)
+ {
+ /*printk("Dynamic route to my own net rejected\n");*/
+ return;
+ }
+ }
+ else
+ mask = guess_mask(dst, dev);
+ dst &= mask;
+ }
+
+ /*
+ * A gateway must be reachable and not a local address
+ */
+
+ if (gw == dev->pa_addr)
+ flags &= ~RTF_GATEWAY;
+
+ if (flags & RTF_GATEWAY)
+ {
+ /*
+ * Don't try to add a gateway we can't reach..
+ */
+
+ if (dev != get_gw_dev(gw))
+ return;
+
+ flags |= RTF_GATEWAY;
+ }
+ else
+ gw = 0;
+
+ /*
+ * Allocate an entry and fill it in.
+ */
+
+ rt = (struct rtable *) kmalloc(sizeof(struct rtable), GFP_ATOMIC);
+ if (rt == NULL)
+ {
+ return;
+ }
+ memset(rt, 0, sizeof(struct rtable));
+ rt->rt_flags = flags | RTF_UP;
+ rt->rt_dst = dst;
+ rt->rt_dev = dev;
+ rt->rt_gateway = gw;
+ rt->rt_mask = mask;
+ rt->rt_mss = dev->mtu - HEADER_SIZE;
+ rt->rt_window = 0; /* Default is no clamping */
+
+ /* Are the MSS/Window valid ? */
+
+ if(rt->rt_flags & RTF_MSS)
+ rt->rt_mss = mtu;
+
+ if(rt->rt_flags & RTF_WINDOW)
+ rt->rt_window = window;
+
+ /*
+ * What we have to do is loop though this until we have
+ * found the first address which has a higher generality than
+ * the one in rt. Then we can put rt in right before it.
+ * The interrupts must be off for this process.
+ */
+
+ save_flags(cpuflags);
+ cli();
+
+ /*
+ * Remove old route if we are getting a duplicate.
+ */
+
+ rp = &rt_base;
+ while ((r = *rp) != NULL)
+ {
+ if (r->rt_dst != dst)
+ {
+ rp = &r->rt_next;
+ continue;
+ }
+ *rp = r->rt_next;
+ if (rt_loopback == r)
+ rt_loopback = NULL;
+ kfree_s(r, sizeof(struct rtable));
+ }
+
+ /*
+ * Add the new route
+ */
+
+ rp = &rt_base;
+ while ((r = *rp) != NULL) {
+ if ((r->rt_mask & mask) != mask)
+ break;
+ rp = &r->rt_next;
+ }
+ rt->rt_next = r;
+ *rp = rt;
+
+ /*
+ * Update the loopback route
+ */
+
+ if ((rt->rt_dev->flags & IFF_LOOPBACK) && !rt_loopback)
+ rt_loopback = rt;
+
+ /*
+ * Restore the interrupts and return
+ */
+
+ restore_flags(cpuflags);
+ return;
+}
+
+
+/*
+ * Check if a mask is acceptable.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Process a route add request from the user
+ */
+
+static int rt_new(struct rtentry *r)
+{
+ int err;
+ char * devname;
+ struct device * dev = NULL;
+ unsigned long flags, daddr, mask, gw;
+
+ /*
+ * If a device is specified find it.
+ */
+
+ if ((devname = r->rt_dev) != NULL)
+ {
+ err = getname(devname, &devname);
+ if (err)
+ return err;
+ dev = dev_get(devname);
+ putname(devname);
+ if (!dev)
+ return -EINVAL;
+ }
+
+ /*
+ * If the device isn't INET, don't allow it
+ */
+
+ if (r->rt_dst.sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ /*
+ * Make local copies of the important bits
+ */
+
+ flags = r->rt_flags;
+ daddr = ((struct sockaddr_in *) &r->rt_dst)->sin_addr.s_addr;
+ mask = ((struct sockaddr_in *) &r->rt_genmask)->sin_addr.s_addr;
+ gw = ((struct sockaddr_in *) &r->rt_gateway)->sin_addr.s_addr;
+
+
+ /*
+ * BSD emulation: Permits route add someroute gw one-of-my-addresses
+ * to indicate which iface. Not as clean as the nice Linux dev technique
+ * but people keep using it...
+ */
+
+ if (!dev && (flags & RTF_GATEWAY))
+ {
+ struct device *dev2;
+ for (dev2 = dev_base ; dev2 != NULL ; dev2 = dev2->next)
+ {
+ if ((dev2->flags & IFF_UP) && dev2->pa_addr == gw)
+ {
+ flags &= ~RTF_GATEWAY;
+ dev = dev2;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Ignore faulty masks
+ */
+
+ if (bad_mask(mask, daddr))
+ mask = 0;
+
+ /*
+ * Set the mask to nothing for host routes.
+ */
+
+ if (flags & RTF_HOST)
+ mask = 0xffffffff;
+ else if (mask && r->rt_genmask.sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ /*
+ * You can only gateway IP via IP..
+ */
+
+ if (flags & RTF_GATEWAY)
+ {
+ if (r->rt_gateway.sa_family != AF_INET)
+ return -EAFNOSUPPORT;
+ if (!dev)
+ dev = get_gw_dev(gw);
+ }
+ else if (!dev)
+ dev = ip_dev_check(daddr);
+
+ /*
+ * Unknown device.
+ */
+
+ if (dev == NULL)
+ return -ENETUNREACH;
+
+ /*
+ * Add the route
+ */
+
+ ip_rt_add(flags, daddr, mask, gw, dev, r->rt_mss, r->rt_window);
+ return 0;
+}
+
+
+/*
+ * Remove a route, as requested by the user.
+ */
+
+static int rt_kill(struct rtentry *r)
+{
+ struct sockaddr_in *trg;
+
+ trg = (struct sockaddr_in *) &r->rt_dst;
+ rt_del(trg->sin_addr.s_addr);
+ return 0;
+}
+
+
+/*
+ * Called from the PROCfs module. This outputs /proc/net/route.
+ */
+
+int rt_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ struct rtable *r;
+ int len=0;
+ off_t pos=0;
+ off_t begin=0;
+ int size;
+
+ len += sprintf(buffer,
+ "Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\n");
+ pos=len;
+
+ /*
+ * This isn't quite right -- r->rt_dst is a struct!
+ */
+
+ for (r = rt_base; r != NULL; r = r->rt_next)
+ {
+ size = sprintf(buffer+len, "%s\t%08lX\t%08lX\t%02X\t%d\t%lu\t%d\t%08lX\t%d\t%lu\n",
+ r->rt_dev->name, r->rt_dst, r->rt_gateway,
+ r->rt_flags, r->rt_refcnt, r->rt_use, r->rt_metric,
+ r->rt_mask, (int)r->rt_mss, r->rt_window);
+ len+=size;
+ pos+=size;
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin);
+ len-=(offset-begin);
+ if(len>length)
+ len=length;
+ return len;
+}
+
+/*
+ * This is hackish, but results in better code. Use "-S" to see why.
+ */
+
+#define early_out ({ goto no_route; 1; })
+
+/*
+ * Route a packet. This needs to be fairly quick. Florian & Co.
+ * suggested a unified ARP and IP routing cache. Done right its
+ * probably a brilliant idea. I'd actually suggest a unified
+ * ARP/IP routing/Socket pointer cache. Volunteers welcome
+ */
+
+struct rtable * ip_rt_route(unsigned long daddr, struct options *opt, unsigned long *src_addr)
+{
+ struct rtable *rt;
+
+ for (rt = rt_base; rt != NULL || early_out ; rt = rt->rt_next)
+ {
+ if (!((rt->rt_dst ^ daddr) & rt->rt_mask))
+ break;
+ /*
+ * broadcast addresses can be special cases..
+ */
+ if (rt->rt_flags & RTF_GATEWAY)
+ continue;
+ if ((rt->rt_dev->flags & IFF_BROADCAST) &&
+ (rt->rt_dev->pa_brdaddr == daddr))
+ break;
+ }
+
+ if(src_addr!=NULL)
+ *src_addr= rt->rt_dev->pa_addr;
+
+ if (daddr == rt->rt_dev->pa_addr) {
+ if ((rt = rt_loopback) == NULL)
+ goto no_route;
+ }
+ rt->rt_use++;
+ return rt;
+no_route:
+ return NULL;
+}
+
+struct rtable * ip_rt_local(unsigned long daddr, struct options *opt, unsigned long *src_addr)
+{
+ struct rtable *rt;
+
+ for (rt = rt_base; rt != NULL || early_out ; rt = rt->rt_next)
+ {
+ /*
+ * No routed addressing.
+ */
+ if (rt->rt_flags&RTF_GATEWAY)
+ continue;
+
+ if (!((rt->rt_dst ^ daddr) & rt->rt_mask))
+ break;
+ /*
+ * broadcast addresses can be special cases..
+ */
+
+ if ((rt->rt_dev->flags & IFF_BROADCAST) &&
+ rt->rt_dev->pa_brdaddr == daddr)
+ break;
+ }
+
+ if(src_addr!=NULL)
+ *src_addr= rt->rt_dev->pa_addr;
+
+ if (daddr == rt->rt_dev->pa_addr) {
+ if ((rt = rt_loopback) == NULL)
+ goto no_route;
+ }
+ rt->rt_use++;
+ return rt;
+no_route:
+ return NULL;
+}
+
+/*
+ * Backwards compatibility
+ */
+
+static int ip_get_old_rtent(struct old_rtentry * src, struct rtentry * rt)
+{
+ int err;
+ struct old_rtentry tmp;
+
+ err=verify_area(VERIFY_READ, src, sizeof(*src));
+ if (err)
+ return err;
+ memcpy_fromfs(&tmp, src, sizeof(*src));
+ memset(rt, 0, sizeof(*rt));
+ rt->rt_dst = tmp.rt_dst;
+ rt->rt_gateway = tmp.rt_gateway;
+ rt->rt_genmask.sa_family = AF_INET;
+ ((struct sockaddr_in *) &rt->rt_genmask)->sin_addr.s_addr = tmp.rt_genmask;
+ rt->rt_flags = tmp.rt_flags;
+ rt->rt_dev = tmp.rt_dev;
+ printk("Warning: obsolete routing request made.\n");
+ return 0;
+}
+
+/*
+ * Handle IP routing ioctl calls. These are used to manipulate the routing tables
+ */
+
+int ip_rt_ioctl(unsigned int cmd, void *arg)
+{
+ int err;
+ struct rtentry rt;
+
+ switch(cmd)
+ {
+ case SIOCADDRTOLD: /* Old style add route */
+ case SIOCDELRTOLD: /* Old style delete route */
+ if (!suser())
+ return -EPERM;
+ err = ip_get_old_rtent((struct old_rtentry *) arg, &rt);
+ if (err)
+ return err;
+ return (cmd == SIOCDELRTOLD) ? rt_kill(&rt) : rt_new(&rt);
+
+ case SIOCADDRT: /* Add a route */
+ case SIOCDELRT: /* Delete a route */
+ if (!suser())
+ return -EPERM;
+ err=verify_area(VERIFY_READ, arg, sizeof(struct rtentry));
+ if (err)
+ return err;
+ memcpy_fromfs(&rt, arg, sizeof(struct rtentry));
+ return (cmd == SIOCDELRT) ? rt_kill(&rt) : rt_new(&rt);
+ }
+
+ return -EINVAL;
+}
diff --git a/net/inet/route.h b/net/inet/route.h
new file mode 100644
index 000000000..a693ffb41
--- /dev/null
+++ b/net/inet/route.h
@@ -0,0 +1,53 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP router.
+ *
+ * Version: @(#)route.h 1.0.4 05/27/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Fixes:
+ * Alan Cox : Reformatted. Added ip_rt_local()
+ * Alan Cox : Support for TCP parameters.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ROUTE_H
+#define _ROUTE_H
+
+
+#include <linux/route.h>
+
+
+/* This is an entry in the IP routing table. */
+struct rtable
+{
+ struct rtable *rt_next;
+ unsigned long rt_dst;
+ unsigned long rt_mask;
+ unsigned long rt_gateway;
+ unsigned char rt_flags;
+ unsigned char rt_metric;
+ short rt_refcnt;
+ unsigned long rt_use;
+ unsigned short rt_mss;
+ unsigned long rt_window;
+ struct device *rt_dev;
+};
+
+
+extern void ip_rt_flush(struct device *dev);
+extern void ip_rt_add(short flags, unsigned long addr, unsigned long mask,
+ unsigned long gw, struct device *dev, unsigned short mss, unsigned long window);
+extern struct rtable *ip_rt_route(unsigned long daddr, struct options *opt, unsigned long *src_addr);
+extern struct rtable *ip_rt_local(unsigned long daddr, struct options *opt, unsigned long *src_addr);
+extern int rt_get_info(char * buffer, char **start, off_t offset, int length);
+extern int ip_rt_ioctl(unsigned int cmd, void *arg);
+
+#endif /* _ROUTE_H */
diff --git a/net/inet/skbuff.c b/net/inet/skbuff.c
new file mode 100644
index 000000000..d5ae2adef
--- /dev/null
+++ b/net/inet/skbuff.c
@@ -0,0 +1,563 @@
+/*
+ * Routines having to do with the 'struct sk_buff' memory handlers.
+ *
+ * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
+ * Florian La Roche <rzsfl@rz.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Fixed the worst of the load balancer bugs.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Note: There are a load of cli()/sti() pairs protecting the net_memory type
+ * variables. Without them for some reason the ++/-- operators do not come out
+ * atomic. Also with gcc 2.4.5 these counts can come out wrong anyway - use 2.5.8!!
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include <linux/string.h>
+#include "route.h"
+#include "tcp.h"
+#include "udp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+
+
+/*
+ * Resource tracking variables
+ */
+
+volatile unsigned long net_memory = 0;
+volatile unsigned long net_skbcount = 0;
+volatile unsigned long net_locked = 0;
+volatile unsigned long net_allocs = 0;
+volatile unsigned long net_fails = 0;
+volatile unsigned long net_free_locked = 0;
+
+void show_net_buffers(void)
+{
+ printk("Networking buffers in use : %lu\n",net_skbcount);
+ printk("Memory committed to network buffers: %lu\n",net_memory);
+ printk("Network buffers locked by drivers : %lu\n",net_locked);
+ printk("Total network buffer allocations : %lu\n",net_allocs);
+ printk("Total failed network buffer allocs : %lu\n",net_fails);
+ printk("Total free while locked events : %lu\n",net_free_locked);
+}
+
+#if CONFIG_SKB_CHECK
+
+/*
+ * Debugging paranoia. Can go later when this crud stack works
+ */
+
+int skb_check(struct sk_buff *skb, int head, int line, char *file)
+{
+ if (head) {
+ if (skb->magic_debug_cookie != SK_HEAD_SKB) {
+ printk("File: %s Line %d, found a bad skb-head\n",
+ file,line);
+ return -1;
+ }
+ if (!skb->next || !skb->prev) {
+ printk("skb_check: head without next or prev\n");
+ return -1;
+ }
+ if (skb->next->magic_debug_cookie != SK_HEAD_SKB
+ && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
+ printk("File: %s Line %d, bad next head-skb member\n",
+ file,line);
+ return -1;
+ }
+ if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
+ && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
+ printk("File: %s Line %d, bad prev head-skb member\n",
+ file,line);
+ return -1;
+ }
+#if 0
+ {
+ struct sk_buff *skb2 = skb->next;
+ int i = 0;
+ while (skb2 != skb && i < 5) {
+ if (skb_check(skb2, 0, line, file) < 0) {
+ printk("bad queue element in whole queue\n");
+ return -1;
+ }
+ i++;
+ skb2 = skb2->next;
+ }
+ }
+#endif
+ return 0;
+ }
+ if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
+ && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
+ printk("File: %s Line %d, bad next skb member\n",
+ file,line);
+ return -1;
+ }
+ if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
+ && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
+ printk("File: %s Line %d, bad prev skb member\n",
+ file,line);
+ return -1;
+ }
+
+
+ if(skb->magic_debug_cookie==SK_FREED_SKB)
+ {
+ printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
+ file,line);
+ printk("skb=%p, real size=%ld, claimed size=%ld, free=%d\n",
+ skb,skb->truesize,skb->mem_len,skb->free);
+ return -1;
+ }
+ if(skb->magic_debug_cookie!=SK_GOOD_SKB)
+ {
+ printk("File: %s Line %d, passed a non skb!\n", file,line);
+ printk("skb=%p, real size=%ld, claimed size=%ld, free=%d\n",
+ skb,skb->truesize,skb->mem_len,skb->free);
+ return -1;
+ }
+ if(skb->mem_len!=skb->truesize)
+ {
+ printk("File: %s Line %d, Dubious size setting!\n",file,line);
+ printk("skb=%p, real size=%ld, claimed size=%ld\n",
+ skb,skb->truesize,skb->mem_len);
+ return -1;
+ }
+ /* Guess it might be acceptable then */
+ return 0;
+}
+#endif
+
+
+void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+#if CONFIG_SKB_CHECK
+ list->magic_debug_cookie = SK_HEAD_SKB;
+#endif
+}
+
+
+/*
+ * Insert an sk_buff at the start of a list.
+ */
+void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
+{
+ unsigned long flags;
+ struct sk_buff *list = (struct sk_buff *)list_;
+
+ save_flags(flags);
+ cli();
+
+#if CONFIG_SKB_CHECK
+ IS_SKB(newsk);
+ IS_SKB_HEAD(list);
+ if (newsk->next || newsk->prev)
+ printk("Suspicious queue head: sk_buff on list!\n");
+#endif
+
+ newsk->next = list->next;
+ newsk->prev = list;
+
+ newsk->next->prev = newsk;
+ newsk->prev->next = newsk;
+
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
+{
+ unsigned long flags;
+ struct sk_buff *list = (struct sk_buff *)list_;
+
+ save_flags(flags);
+ cli();
+
+#if CONFIG_SKB_CHECK
+ if (newsk->next || newsk->prev)
+ printk("Suspicious queue tail: sk_buff on list!\n");
+ IS_SKB(newsk);
+ IS_SKB_HEAD(list);
+#endif
+
+ newsk->next = list;
+ newsk->prev = list->prev;
+
+ newsk->next->prev = newsk;
+ newsk->prev->next = newsk;
+
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list. This routine is also interrupt safe
+ * so you can grab read and free buffers as another process adds them.
+ */
+
+struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
+{
+ long flags;
+ struct sk_buff *result;
+ struct sk_buff *list = (struct sk_buff *)list_;
+
+ save_flags(flags);
+ cli();
+
+ IS_SKB_HEAD(list);
+
+ result = list->next;
+ if (result == list) {
+ restore_flags(flags);
+ return NULL;
+ }
+
+ result->next->prev = list;
+ list->next = result->next;
+
+ result->next = NULL;
+ result->prev = NULL;
+
+ restore_flags(flags);
+
+ IS_SKB(result);
+ return result;
+}
+
+/*
+ * Insert a packet before another one in a list.
+ */
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+#if CONFIG_SKB_CHECK
+ IS_SKB(old);
+ IS_SKB(newsk);
+
+ if(!old->next || !old->prev)
+ printk("insert before unlisted item!\n");
+ if(newsk->next || newsk->prev)
+ printk("inserted item is already on a list.\n");
+#endif
+
+ save_flags(flags);
+ cli();
+ newsk->next = old;
+ newsk->prev = old->prev;
+ old->prev = newsk;
+ newsk->prev->next = newsk;
+
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+#if CONFIG_SKB_CHECK
+ IS_SKB(old);
+ IS_SKB(newsk);
+
+ if(!old->next || !old->prev)
+ printk("append before unlisted item!\n");
+ if(newsk->next || newsk->prev)
+ printk("append item is already on a list.\n");
+#endif
+
+ save_flags(flags);
+ cli();
+
+ newsk->prev = old;
+ newsk->next = old->next;
+ newsk->next->prev = newsk;
+ old->next = newsk;
+
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ IS_SKB(skb);
+
+ if(skb->prev && skb->next)
+ {
+ skb->next->prev = skb->prev;
+ skb->prev->next = skb->next;
+ skb->next = NULL;
+ skb->prev = NULL;
+ }
+#ifdef PARANOID_BUGHUNT_MODE /* This is legal but we sometimes want to watch it */
+ else
+ printk("skb_unlink: not a linked element\n");
+#endif
+ restore_flags(flags);
+}
+
+/*
+ * Free an sk_buff. This still knows about things it should
+ * not need to like protocols and sockets.
+ */
+
+void kfree_skb(struct sk_buff *skb, int rw)
+{
+ if (skb == NULL)
+ {
+ printk("kfree_skb: skb = NULL (from %p)\n",
+ __builtin_return_address(0));
+ return;
+ }
+ IS_SKB(skb);
+ if (skb->lock)
+ {
+ skb->free = 3; /* Free when unlocked */
+ net_free_locked++;
+ return;
+ }
+ if (skb->free == 2)
+ printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
+ __builtin_return_address(0));
+ if (skb->next)
+ printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
+ __builtin_return_address(0));
+ if (skb->sk)
+ {
+ if(skb->sk->prot!=NULL)
+ {
+ if (rw)
+ skb->sk->prot->rfree(skb->sk, skb, skb->mem_len);
+ else
+ skb->sk->prot->wfree(skb->sk, skb, skb->mem_len);
+
+ }
+ else
+ {
+ /* Non INET - default wmalloc/rmalloc handler */
+ if (rw)
+ skb->sk->rmem_alloc-=skb->mem_len;
+ else
+ skb->sk->wmem_alloc-=skb->mem_len;
+ if(!skb->sk->dead)
+ skb->sk->write_space(skb->sk);
+ }
+ }
+ else
+ kfree_skbmem(skb, skb->mem_len);
+}
+
+/*
+ * Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
+ * fields and also do memory statistics to find all the [BEEP] leaks.
+ */
+struct sk_buff *alloc_skb(unsigned int size,int priority)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (intr_count && priority!=GFP_ATOMIC) {
+ static int count = 0;
+ if (++count < 5) {
+ printk("alloc_skb called nonatomically from interrupt %p\n",
+ __builtin_return_address(0));
+ priority = GFP_ATOMIC;
+ }
+ }
+
+ size+=sizeof(struct sk_buff);
+ skb=(struct sk_buff *)kmalloc(size,priority);
+ if (skb == NULL)
+ {
+ net_fails++;
+ return NULL;
+ }
+#ifdef PARANOID_BUGHUNT_MODE
+ if(skb->magic_debug_cookie == SK_GOOD_SKB)
+ printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
+#endif
+
+ net_allocs++;
+
+ skb->free = 2; /* Invalid so we pick up forgetful users */
+ skb->lock = 0;
+ skb->pkt_type = PACKET_HOST; /* Default type */
+ skb->truesize = size;
+ skb->mem_len = size;
+ skb->mem_addr = skb;
+#ifdef CONFIG_SLAVE_BALANCING
+ skb->in_dev_queue = 0;
+#endif
+ skb->fraglist = NULL;
+ skb->prev = skb->next = NULL;
+ skb->link3 = NULL;
+ skb->sk = NULL;
+ skb->localroute=0;
+ skb->stamp.tv_sec=0; /* No idea about time */
+ skb->localroute = 0;
+ save_flags(flags);
+ cli();
+ net_memory += size;
+ net_skbcount++;
+ restore_flags(flags);
+#if CONFIG_SKB_CHECK
+ skb->magic_debug_cookie = SK_GOOD_SKB;
+#endif
+ skb->users = 0;
+ return skb;
+}
+
+/*
+ * Free an skbuff by memory
+ */
+
+void kfree_skbmem(struct sk_buff *skb,unsigned size)
+{
+ unsigned long flags;
+#ifdef CONFIG_SLAVE_BALANCING
+ save_flags(flags);
+ cli();
+ if(skb->in_dev_queue && skb->dev!=NULL)
+ skb->dev->pkt_queue--;
+ restore_flags(flags);
+#endif
+ IS_SKB(skb);
+ if(size!=skb->truesize)
+ printk("kfree_skbmem: size mismatch.\n");
+
+ if(skb->magic_debug_cookie == SK_GOOD_SKB)
+ {
+ save_flags(flags);
+ cli();
+ IS_SKB(skb);
+ skb->magic_debug_cookie = SK_FREED_SKB;
+ kfree_s((void *)skb,size);
+ net_skbcount--;
+ net_memory -= size;
+ restore_flags(flags);
+ }
+ else
+ printk("kfree_skbmem: bad magic cookie\n");
+}
+
+/*
+ * Duplicate an sk_buff. The new one is not owned by a socket or locked
+ * and will be freed on deletion.
+ */
+
+struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
+{
+ struct sk_buff *n;
+ unsigned long offset;
+
+ n=alloc_skb(skb->mem_len-sizeof(struct sk_buff),priority);
+ if(n==NULL)
+ return NULL;
+
+ offset=((char *)n)-((char *)skb);
+
+ memcpy(n->data,skb->data,skb->mem_len-sizeof(struct sk_buff));
+ n->len=skb->len;
+ n->link3=NULL;
+ n->sk=NULL;
+ n->when=skb->when;
+ n->dev=skb->dev;
+ n->h.raw=skb->h.raw+offset;
+ n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
+ n->fraglen=skb->fraglen;
+ n->fraglist=skb->fraglist;
+ n->saddr=skb->saddr;
+ n->daddr=skb->daddr;
+ n->raddr=skb->raddr;
+ n->acked=skb->acked;
+ n->used=skb->used;
+ n->free=1;
+ n->arp=skb->arp;
+ n->tries=0;
+ n->lock=0;
+ n->users=0;
+ n->pkt_type=skb->pkt_type;
+ return n;
+}
+
+
+/*
+ * Skbuff device locking
+ */
+
+void skb_device_lock(struct sk_buff *skb)
+{
+ if(skb->lock)
+ printk("double lock on device queue!\n");
+ else
+ net_locked++;
+ skb->lock++;
+}
+
+void skb_device_unlock(struct sk_buff *skb)
+{
+ if(skb->lock==0)
+ printk("double unlock on device queue!\n");
+ skb->lock--;
+ if(skb->lock==0)
+ net_locked--;
+}
+
+void dev_kfree_skb(struct sk_buff *skb, int mode)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->lock==1)
+ net_locked--;
+
+ if (!--skb->lock && (skb->free == 1 || skb->free == 3))
+ {
+ restore_flags(flags);
+ kfree_skb(skb,mode);
+ }
+ else
+ restore_flags(flags);
+}
+
+int skb_device_locked(struct sk_buff *skb)
+{
+ return skb->lock? 1 : 0;
+}
+
diff --git a/net/inet/snmp.h b/net/inet/snmp.h
new file mode 100644
index 000000000..552292be6
--- /dev/null
+++ b/net/inet/snmp.h
@@ -0,0 +1,107 @@
+/*
+ *
+ * SNMP MIB entries for the IP subsystem.
+ *
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ *
+ * We don't chose to implement SNMP in the kernel (this would
+ * be silly as SNMP is a pain in the backside in places). We do
+ * however need to collect the MIB statistics and export them
+ * out of /proc (eventually)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _SNMP_H
+#define _SNMP_H
+
+/*
+ * We use all unsigned longs. Linux will soon be so reliable that even these
+ * will rapidly get too small 8-). Seriously consider the IpInReceives count
+ * on the 20Gb/s + networks people expect in a few years time!
+ */
+
+struct ip_mib
+{
+ unsigned long IpForwarding;
+ unsigned long IpDefaultTTL;
+ unsigned long IpInReceives;
+ unsigned long IpInHdrErrors;
+ unsigned long IpInAddrErrors;
+ unsigned long IpForwDatagrams;
+ unsigned long IpInUnknownProtos;
+ unsigned long IpInDiscards;
+ unsigned long IpInDelivers;
+ unsigned long IpOutRequests;
+ unsigned long IpOutDiscards;
+ unsigned long IpOutNoRoutes;
+ unsigned long IpReasmTimeout;
+ unsigned long IpReasmReqds;
+ unsigned long IpReasmOKs;
+ unsigned long IpReasmFails;
+ unsigned long IpFragOKs;
+ unsigned long IpFragFails;
+ unsigned long IpFragCreates;
+};
+
+
+struct icmp_mib
+{
+ unsigned long IcmpInMsgs;
+ unsigned long IcmpInErrors;
+ unsigned long IcmpInDestUnreachs;
+ unsigned long IcmpInTimeExcds;
+ unsigned long IcmpInParmProbs;
+ unsigned long IcmpInSrcQuenchs;
+ unsigned long IcmpInRedirects;
+ unsigned long IcmpInEchos;
+ unsigned long IcmpInEchoReps;
+ unsigned long IcmpInTimestamps;
+ unsigned long IcmpInTimestampReps;
+ unsigned long IcmpInAddrMasks;
+ unsigned long IcmpInAddrMaskReps;
+ unsigned long IcmpOutMsgs;
+ unsigned long IcmpOutErrors;
+ unsigned long IcmpOutDestUnreachs;
+ unsigned long IcmpOutTimeExcds;
+ unsigned long IcmpOutParmProbs;
+ unsigned long IcmpOutSrcQuenchs;
+ unsigned long IcmpOutRedirects;
+ unsigned long IcmpOutEchos;
+ unsigned long IcmpOutEchoReps;
+ unsigned long IcmpOutTimestamps;
+ unsigned long IcmpOutTimestampReps;
+ unsigned long IcmpOutAddrMasks;
+ unsigned long IcmpOutAddrMaskReps;
+};
+
+struct tcp_mib
+{
+ unsigned long TcpRtoAlgorithm;
+ unsigned long TcpRtoMin;
+ unsigned long TcpRtoMax;
+ unsigned long TcpMaxConn;
+ unsigned long TcpActiveOpens;
+ unsigned long TcpPassiveOpens;
+ unsigned long TcpAttemptFails;
+ unsigned long TcpEstabResets;
+ unsigned long TcpCurrEstab;
+ unsigned long TcpInSegs;
+ unsigned long TcpOutSegs;
+ unsigned long TcpRetransSegs;
+};
+
+struct udp_mib
+{
+ unsigned long UdpInDatagrams;
+ unsigned long UdpNoPorts;
+ unsigned long UdpInErrors;
+ unsigned long UdpOutDatagrams;
+};
+
+
+#endif
diff --git a/net/inet/sock.c b/net/inet/sock.c
new file mode 100644
index 000000000..fd5f123bd
--- /dev/null
+++ b/net/inet/sock.c
@@ -0,0 +1,544 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Generic socket support routines. Memory allocators, sk->inuse/release
+ * handler for protocols to use and generic option handler.
+ *
+ *
+ * Version: @(#)sock.c 1.0.17 06/02/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche, <flla@stud.uni-sb.de>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ *
+ * Fixes:
+ * Alan Cox : Numerous verify_area() problems
+ * Alan Cox : Connecting on a connecting socket
+ * now returns an error for tcp.
+ * Alan Cox : sock->protocol is set correctly.
+ * and is not sometimes left as 0.
+ * Alan Cox : connect handles icmp errors on a
+ * connect properly. Unfortunately there
+ * is a restart syscall nasty there. I
+ * can't match BSD without hacking the C
+ * library. Ideas urgently sought!
+ * Alan Cox : Disallow bind() to addresses that are
+ * not ours - especially broadcast ones!!
+ * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
+ * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
+ * instead they leave that for the DESTROY timer.
+ * Alan Cox : Clean up error flag in accept
+ * Alan Cox : TCP ack handling is buggy, the DESTROY timer
+ * was buggy. Put a remove_sock() in the handler
+ * for memory when we hit 0. Also altered the timer
+ * code. The ACK stuff can wait and needs major
+ * TCP layer surgery.
+ * Alan Cox : Fixed TCP ack bug, removed remove sock
+ * and fixed timer/inet_bh race.
+ * Alan Cox : Added zapped flag for TCP
+ * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
+ * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
+ * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
+ * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
+ * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
+ * Rick Sladkey : Relaxed UDP rules for matching packets.
+ * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
+ * Pauline Middelink : identd support
+ * Alan Cox : Fixed connect() taking signals I think.
+ * Alan Cox : SO_LINGER supported
+ * Alan Cox : Error reporting fixes
+ * Anonymous : inet_create tidied up (sk->reuse setting)
+ * Alan Cox : inet sockets don't set sk->type!
+ * Alan Cox : Split socket option code
+ * Alan Cox : Callbacks
+ * Alan Cox : Nagle flag for Charles & Johannes stuff
+ * Alex : Removed restriction on inet fioctl
+ * Alan Cox : Splitting INET from NET core
+ * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
+ * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
+ * Alan Cox : Split IP from generic code
+ * Alan Cox : New kfree_skbmem()
+ *
+ * To Fix:
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "arp.h"
+#include "rarp.h"
+#include "route.h"
+#include "tcp.h"
+#include "udp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "raw.h"
+#include "icmp.h"
+
+#define min(a,b) ((a)<(b)?(a):(b))
+
+/*
+ * This is meant for all protocols to use and covers goings on
+ * at the socket level. Everything here is generic.
+ */
+
+int sock_setsockopt(struct sock *sk, int level, int optname,
+ char *optval, int optlen)
+{
+ int val;
+ int err;
+ struct linger ling;
+
+ if (optval == NULL)
+ return(-EINVAL);
+
+ err=verify_area(VERIFY_READ, optval, sizeof(int));
+ if(err)
+ return err;
+
+ val = get_fs_long((unsigned long *)optval);
+ switch(optname)
+ {
+ case SO_TYPE:
+ case SO_ERROR:
+ return(-ENOPROTOOPT);
+
+ case SO_DEBUG:
+ sk->debug=val?1:0;
+ return 0;
+ case SO_DONTROUTE:
+ sk->localroute=val?1:0;
+ return 0;
+ case SO_BROADCAST:
+ sk->broadcast=val?1:0;
+ return 0;
+ case SO_SNDBUF:
+ if(val>32767)
+ val=32767;
+ if(val<256)
+ val=256;
+ sk->sndbuf=val;
+ return 0;
+ case SO_LINGER:
+ err=verify_area(VERIFY_READ,optval,sizeof(ling));
+ if(err)
+ return err;
+ memcpy_fromfs(&ling,optval,sizeof(ling));
+ if(ling.l_onoff==0)
+ sk->linger=0;
+ else
+ {
+ sk->lingertime=ling.l_linger;
+ sk->linger=1;
+ }
+ return 0;
+ case SO_RCVBUF:
+ if(val>32767)
+ val=32767;
+ if(val<256)
+ val=256;
+ sk->rcvbuf=val;
+ return(0);
+
+ case SO_REUSEADDR:
+ if (val)
+ sk->reuse = 1;
+ else
+ sk->reuse = 0;
+ return(0);
+
+ case SO_KEEPALIVE:
+ if (val)
+ sk->keepopen = 1;
+ else
+ sk->keepopen = 0;
+ return(0);
+
+ case SO_OOBINLINE:
+ if (val)
+ sk->urginline = 1;
+ else
+ sk->urginline = 0;
+ return(0);
+
+ case SO_NO_CHECK:
+ if (val)
+ sk->no_check = 1;
+ else
+ sk->no_check = 0;
+ return(0);
+
+ case SO_PRIORITY:
+ if (val >= 0 && val < DEV_NUMBUFFS)
+ {
+ sk->priority = val;
+ }
+ else
+ {
+ return(-EINVAL);
+ }
+ return(0);
+
+ default:
+ return(-ENOPROTOOPT);
+ }
+}
+
+
+int sock_getsockopt(struct sock *sk, int level, int optname,
+ char *optval, int *optlen)
+{
+ int val;
+ int err;
+ struct linger ling;
+
+ switch(optname)
+ {
+ case SO_DEBUG:
+ val = sk->debug;
+ break;
+
+ case SO_DONTROUTE:
+ val = sk->localroute;
+ break;
+
+ case SO_BROADCAST:
+ val= sk->broadcast;
+ break;
+
+ case SO_LINGER:
+ err=verify_area(VERIFY_WRITE,optval,sizeof(ling));
+ if(err)
+ return err;
+ err=verify_area(VERIFY_WRITE,optlen,sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(sizeof(ling),(unsigned long *)optlen);
+ ling.l_onoff=sk->linger;
+ ling.l_linger=sk->lingertime;
+ memcpy_tofs(optval,&ling,sizeof(ling));
+ return 0;
+
+ case SO_SNDBUF:
+ val=sk->sndbuf;
+ break;
+
+ case SO_RCVBUF:
+ val =sk->rcvbuf;
+ break;
+
+ case SO_REUSEADDR:
+ val = sk->reuse;
+ break;
+
+ case SO_KEEPALIVE:
+ val = sk->keepopen;
+ break;
+
+ case SO_TYPE:
+#if 0
+ if (sk->prot == &tcp_prot)
+ val = SOCK_STREAM;
+ else
+ val = SOCK_DGRAM;
+#endif
+ val = sk->type;
+ break;
+
+ case SO_ERROR:
+ val = sk->err;
+ sk->err = 0;
+ break;
+
+ case SO_OOBINLINE:
+ val = sk->urginline;
+ break;
+
+ case SO_NO_CHECK:
+ val = sk->no_check;
+ break;
+
+ case SO_PRIORITY:
+ val = sk->priority;
+ break;
+
+ default:
+ return(-ENOPROTOOPT);
+ }
+ err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(sizeof(int),(unsigned long *) optlen);
+
+ err=verify_area(VERIFY_WRITE, optval, sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(val,(unsigned long *)optval);
+
+ return(0);
+}
+
+
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
+{
+ if (sk)
+ {
+ if (sk->wmem_alloc + size < sk->sndbuf || force)
+ {
+ struct sk_buff * c = alloc_skb(size, priority);
+ if (c)
+ {
+ cli();
+ sk->wmem_alloc+= c->mem_len;
+ sti();
+ }
+ return c;
+ }
+ return(NULL);
+ }
+ return(alloc_skb(size, priority));
+}
+
+
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
+{
+ if (sk)
+ {
+ if (sk->rmem_alloc + size < sk->rcvbuf || force)
+ {
+ struct sk_buff *c = alloc_skb(size, priority);
+ if (c)
+ {
+ cli();
+ sk->rmem_alloc += c->mem_len;
+ sti();
+ }
+ return(c);
+ }
+ return(NULL);
+ }
+ return(alloc_skb(size, priority));
+}
+
+
+unsigned long sock_rspace(struct sock *sk)
+{
+ int amt;
+
+ if (sk != NULL)
+ {
+ if (sk->rmem_alloc >= sk->rcvbuf-2*MIN_WINDOW)
+ return(0);
+ amt = min((sk->rcvbuf-sk->rmem_alloc)/2-MIN_WINDOW, MAX_WINDOW);
+ if (amt < 0)
+ return(0);
+ return(amt);
+ }
+ return(0);
+}
+
+
+unsigned long sock_wspace(struct sock *sk)
+{
+ if (sk != NULL)
+ {
+ if (sk->shutdown & SEND_SHUTDOWN)
+ return(0);
+ if (sk->wmem_alloc >= sk->sndbuf)
+ return(0);
+ return(sk->sndbuf-sk->wmem_alloc );
+ }
+ return(0);
+}
+
+
+void sock_wfree(struct sock *sk, struct sk_buff *skb, unsigned long size)
+{
+ IS_SKB(skb);
+ kfree_skbmem(skb, size);
+ if (sk)
+ {
+ sk->wmem_alloc -= size;
+ /* In case it might be waiting for more memory. */
+ if (!sk->dead)
+ sk->write_space(sk);
+ return;
+ }
+}
+
+
+void sock_rfree(struct sock *sk, struct sk_buff *skb, unsigned long size)
+{
+ IS_SKB(skb);
+ kfree_skbmem(skb, size);
+ if (sk)
+ {
+ sk->rmem_alloc -= size;
+ }
+}
+
+/*
+ * Generic send/receive buffer handlers
+ */
+
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode)
+{
+ struct sk_buff *skb;
+ int err;
+
+ sk->inuse=1;
+
+ do
+ {
+ if(sk->err!=0)
+ {
+ cli();
+ err= -sk->err;
+ sk->err=0;
+ sti();
+ *errcode=err;
+ return NULL;
+ }
+
+ if(sk->shutdown&SEND_SHUTDOWN)
+ {
+ *errcode=-EPIPE;
+ return NULL;
+ }
+
+ skb = sock_wmalloc(sk, size, 0, GFP_KERNEL);
+
+ if(skb==NULL)
+ {
+ unsigned long tmp;
+ if(noblock)
+ {
+ *errcode=-EAGAIN;
+ return NULL;
+ }
+ if(sk->shutdown&SEND_SHUTDOWN)
+ {
+ *errcode=-EPIPE;
+ return NULL;
+ }
+ tmp = sk->wmem_alloc;
+ cli();
+ if(sk->shutdown&SEND_SHUTDOWN)
+ {
+ sti();
+ *errcode=-EPIPE;
+ return NULL;
+ }
+
+ if( tmp <= sk->wmem_alloc)
+ {
+ interruptible_sleep_on(sk->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ *errcode = -ERESTARTSYS;
+ return NULL;
+ }
+ }
+ sti();
+ }
+ }
+ while(skb==NULL);
+
+ return skb;
+}
+
+/*
+ * Queue a received datagram if it will fit. Stream and sequenced protocols
+ * can't normally use this as they need to fit buffers in and play with them.
+ */
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if(sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
+ return -ENOMEM;
+ sk->rmem_alloc+=skb->mem_len;
+ skb->sk=sk;
+ skb_queue_tail(&sk->receive_queue,skb);
+ if(!sk->dead)
+ sk->data_ready(sk,skb->len);
+ return 0;
+}
+
+void release_sock(struct sock *sk)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (!sk->prot)
+ return;
+ /*
+ * Make the backlog atomic. If we don't do this there is a tiny
+ * window where a packet may arrive between the sk->blog being
+ * tested and then set with sk->inuse still 0 causing an extra
+ * unwanted re-entry into release_sock().
+ */
+
+ save_flags(flags);
+ cli();
+ if (sk->blog)
+ {
+ restore_flags(flags);
+ return;
+ }
+ sk->blog=1;
+ sk->inuse = 1;
+ restore_flags(flags);
+#ifdef CONFIG_INET
+ /* See if we have any packets built up. */
+ while((skb = skb_dequeue(&sk->back_log)) != NULL)
+ {
+ sk->blog = 1;
+ if (sk->prot->rcv)
+ sk->prot->rcv(skb, skb->dev, sk->opt,
+ skb->saddr, skb->len, skb->daddr, 1,
+ /* Only used for/by raw sockets. */
+ (struct inet_protocol *)sk->pair);
+ }
+#endif
+ sk->blog = 0;
+ sk->inuse = 0;
+#ifdef CONFIG_INET
+ if (sk->dead && sk->state == TCP_CLOSE)
+ {
+ /* Should be about 2 rtt's */
+ reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
+ }
+#endif
+}
+
+
diff --git a/net/inet/sock.h b/net/inet/sock.h
new file mode 100644
index 000000000..07b036fd5
--- /dev/null
+++ b/net/inet/sock.h
@@ -0,0 +1,287 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the AF_INET socket handler.
+ *
+ * Version: @(#)sock.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Volatiles in skbuff pointers. See
+ * skbuff comments. May be overdone,
+ * better to prove they can be removed
+ * than the reverse.
+ * Alan Cox : Added a zapped field for tcp to note
+ * a socket is reset and must stay shut up
+ * Alan Cox : New fields for options
+ * Pauline Middelink : identd support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/timer.h>
+#include <linux/ip.h> /* struct options */
+#include <linux/tcp.h> /* struct tcphdr */
+
+#include <linux/skbuff.h> /* struct sk_buff */
+#include "protocol.h" /* struct inet_protocol */
+#ifdef CONFIG_AX25
+#include "ax25.h"
+#endif
+#ifdef CONFIG_IPX
+#include "ipx.h"
+#endif
+
+#define SOCK_ARRAY_SIZE 64
+
+
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+struct sock {
+ struct options *opt;
+ volatile unsigned long wmem_alloc;
+ volatile unsigned long rmem_alloc;
+ unsigned long write_seq;
+ unsigned long sent_seq;
+ unsigned long acked_seq;
+ unsigned long copied_seq;
+ unsigned long rcv_ack_seq;
+ unsigned long window_seq;
+ unsigned long fin_seq;
+ unsigned long urg_seq;
+ unsigned long urg_data;
+
+ /*
+ * Not all are volatile, but some are, so we
+ * might as well say they all are.
+ */
+ volatile char inuse,
+ dead,
+ urginline,
+ intr,
+ blog,
+ done,
+ reuse,
+ keepopen,
+ linger,
+ delay_acks,
+ destroy,
+ ack_timed,
+ no_check,
+ zapped, /* In ax25 & ipx means not linked */
+ broadcast,
+ nonagle;
+ unsigned long lingertime;
+ int proc;
+ struct sock *next;
+ struct sock *pair;
+ struct sk_buff * volatile send_head;
+ struct sk_buff * volatile send_tail;
+ struct sk_buff_head back_log;
+ struct sk_buff *partial;
+ struct timer_list partial_timer;
+ long retransmits;
+ struct sk_buff_head write_queue,
+ receive_queue;
+ struct proto *prot;
+ struct wait_queue **sleep;
+ unsigned long daddr;
+ unsigned long saddr;
+ unsigned short max_unacked;
+ unsigned short window;
+ unsigned short bytes_rcv;
+/* mss is min(mtu, max_window) */
+ unsigned short mtu; /* mss negotiated in the syn's */
+ volatile unsigned short mss; /* current eff. mss - can change */
+ volatile unsigned short user_mss; /* mss requested by user in ioctl */
+ volatile unsigned short max_window;
+ unsigned long window_clamp;
+ unsigned short num;
+ volatile unsigned short cong_window;
+ volatile unsigned short cong_count;
+ volatile unsigned short ssthresh;
+ volatile unsigned short packets_out;
+ volatile unsigned short shutdown;
+ volatile unsigned long rtt;
+ volatile unsigned long mdev;
+ volatile unsigned long rto;
+/* currently backoff isn't used, but I'm maintaining it in case
+ * we want to go back to a backoff formula that needs it
+ */
+ volatile unsigned short backoff;
+ volatile short err;
+ unsigned char protocol;
+ volatile unsigned char state;
+ volatile unsigned char ack_backlog;
+ unsigned char max_ack_backlog;
+ unsigned char priority;
+ unsigned char debug;
+ unsigned short rcvbuf;
+ unsigned short sndbuf;
+ unsigned short type;
+ unsigned char localroute; /* Route locally only */
+#ifdef CONFIG_IPX
+ ipx_address ipx_source_addr,ipx_dest_addr;
+ unsigned short ipx_type;
+#endif
+#ifdef CONFIG_AX25
+/* Really we want to add a per protocol private area */
+ ax25_address ax25_source_addr,ax25_dest_addr;
+ struct sk_buff *volatile ax25_retxq[8];
+ char ax25_state,ax25_vs,ax25_vr,ax25_lastrxnr,ax25_lasttxnr;
+ char ax25_condition;
+ char ax25_retxcnt;
+ char ax25_xx;
+ char ax25_retxqi;
+ char ax25_rrtimer;
+ char ax25_timer;
+ unsigned char ax25_n2;
+ unsigned short ax25_t1,ax25_t2,ax25_t3;
+ ax25_digi *ax25_digipeat;
+#endif
+/* IP 'private area' or will be eventually */
+ int ip_ttl; /* TTL setting */
+ int ip_tos; /* TOS */
+ struct tcphdr dummy_th;
+
+ /* This part is used for the timeout functions (timer.c). */
+ int timeout; /* What are we waiting for? */
+ struct timer_list timer;
+ struct timeval stamp;
+
+ /* identd */
+ struct socket *socket;
+
+ /* Callbacks */
+ void (*state_change)(struct sock *sk);
+ void (*data_ready)(struct sock *sk,int bytes);
+ void (*write_space)(struct sock *sk);
+ void (*error_report)(struct sock *sk);
+
+};
+
+struct proto {
+ struct sk_buff * (*wmalloc)(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+ struct sk_buff * (*rmalloc)(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+ void (*wfree)(struct sock *sk, struct sk_buff *skb,
+ unsigned long size);
+ void (*rfree)(struct sock *sk, struct sk_buff *skb,
+ unsigned long size);
+ unsigned long (*rspace)(struct sock *sk);
+ unsigned long (*wspace)(struct sock *sk);
+ void (*close)(struct sock *sk, int timeout);
+ int (*read)(struct sock *sk, unsigned char *to,
+ int len, int nonblock, unsigned flags);
+ int (*write)(struct sock *sk, unsigned char *to,
+ int len, int nonblock, unsigned flags);
+ int (*sendto)(struct sock *sk,
+ unsigned char *from, int len, int noblock,
+ unsigned flags, struct sockaddr_in *usin,
+ int addr_len);
+ int (*recvfrom)(struct sock *sk,
+ unsigned char *from, int len, int noblock,
+ unsigned flags, struct sockaddr_in *usin,
+ int *addr_len);
+ int (*build_header)(struct sk_buff *skb,
+ unsigned long saddr,
+ unsigned long daddr,
+ struct device **dev, int type,
+ struct options *opt, int len, int tos, int ttl);
+ int (*connect)(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+ struct sock * (*accept) (struct sock *sk, int flags);
+ void (*queue_xmit)(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+ void (*retransmit)(struct sock *sk, int all);
+ void (*write_wakeup)(struct sock *sk);
+ void (*read_wakeup)(struct sock *sk);
+ int (*rcv)(struct sk_buff *buff, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr,
+ int redo, struct inet_protocol *protocol);
+ int (*select)(struct sock *sk, int which,
+ select_table *wait);
+ int (*ioctl)(struct sock *sk, int cmd,
+ unsigned long arg);
+ int (*init)(struct sock *sk);
+ void (*shutdown)(struct sock *sk, int how);
+ int (*setsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int *option);
+ unsigned short max_header;
+ unsigned long retransmits;
+ struct sock * sock_array[SOCK_ARRAY_SIZE];
+ char name[80];
+};
+
+#define TIME_WRITE 1
+#define TIME_CLOSE 2
+#define TIME_KEEPOPEN 3
+#define TIME_DESTROY 4
+#define TIME_DONE 5 /* used to absorb those last few packets */
+#define TIME_PROBE0 6
+#define SOCK_DESTROY_TIME 1000 /* about 10 seconds */
+
+#define PROT_SOCK 1024 /* Sockets 0-1023 can't be bound too unless you are superuser */
+
+#define SHUTDOWN_MASK 3
+#define RCV_SHUTDOWN 1
+#define SEND_SHUTDOWN 2
+
+
+extern void destroy_sock(struct sock *sk);
+extern unsigned short get_new_socknum(struct proto *, unsigned short);
+extern void put_sock(unsigned short, struct sock *);
+extern void release_sock(struct sock *sk);
+extern struct sock *get_sock(struct proto *, unsigned short,
+ unsigned long, unsigned short,
+ unsigned long);
+extern void print_sk(struct sock *);
+extern struct sk_buff *sock_wmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern struct sk_buff *sock_rmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern void sock_wfree(struct sock *sk, struct sk_buff *skb,
+ unsigned long size);
+extern void sock_rfree(struct sock *sk, struct sk_buff *skb,
+ unsigned long size);
+extern unsigned long sock_rspace(struct sock *sk);
+extern unsigned long sock_wspace(struct sock *sk);
+
+extern int sock_setsockopt(struct sock *sk,int level,int op,char *optval,int optlen);
+
+extern int sock_getsockopt(struct sock *sk,int level,int op,char *optval,int *optlen);
+extern struct sk_buff *sock_alloc_send_skb(struct sock *skb, unsigned long size, int noblock, int *errcode);
+extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+/* declarations from timer.c */
+extern struct sock *timer_base;
+
+void delete_timer (struct sock *);
+void reset_timer (struct sock *, int, unsigned long);
+void net_timer (unsigned long);
+
+
+#endif /* _SOCK_H */
diff --git a/net/inet/tcp.c b/net/inet/tcp.c
new file mode 100644
index 000000000..a2bbbe861
--- /dev/null
+++ b/net/inet/tcp.c
@@ -0,0 +1,4582 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Implementation of the Transmission Control Protocol(TCP).
+ *
+ * Version: @(#)tcp.c 1.0.16 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche, <flla@stud.uni-sb.de>
+ * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
+ * Linus Torvalds, <torvalds@cs.helsinki.fi>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Matthew Dillon, <dillon@apollo.west.oic.com>
+ * Arnt Gulbrandsen, <agulbra@no.unit.nvg>
+ *
+ * Fixes:
+ * Alan Cox : Numerous verify_area() calls
+ * Alan Cox : Set the ACK bit on a reset
+ * Alan Cox : Stopped it crashing if it closed while sk->inuse=1
+ * and was trying to connect (tcp_err()).
+ * Alan Cox : All icmp error handling was broken
+ * pointers passed where wrong and the
+ * socket was looked up backwards. Nobody
+ * tested any icmp error code obviously.
+ * Alan Cox : tcp_err() now handled properly. It wakes people
+ * on errors. select behaves and the icmp error race
+ * has gone by moving it into sock.c
+ * Alan Cox : tcp_reset() fixed to work for everything not just
+ * packets for unknown sockets.
+ * Alan Cox : tcp option processing.
+ * Alan Cox : Reset tweaked (still not 100%) [Had syn rule wrong]
+ * Herp Rosmanith : More reset fixes
+ * Alan Cox : No longer acks invalid rst frames. Acking
+ * any kind of RST is right out.
+ * Alan Cox : Sets an ignore me flag on an rst receive
+ * otherwise odd bits of prattle escape still
+ * Alan Cox : Fixed another acking RST frame bug. Should stop
+ * LAN workplace lockups.
+ * Alan Cox : Some tidyups using the new skb list facilities
+ * Alan Cox : sk->keepopen now seems to work
+ * Alan Cox : Pulls options out correctly on accepts
+ * Alan Cox : Fixed assorted sk->rqueue->next errors
+ * Alan Cox : PSH doesn't end a TCP read. Switched a bit to skb ops.
+ * Alan Cox : Tidied tcp_data to avoid a potential nasty.
+ * Alan Cox : Added some better commenting, as the tcp is hard to follow
+ * Alan Cox : Removed incorrect check for 20 * psh
+ * Michael O'Reilly : ack < copied bug fix.
+ * Johannes Stille : Misc tcp fixes (not all in yet).
+ * Alan Cox : FIN with no memory -> CRASH
+ * Alan Cox : Added socket option proto entries. Also added awareness of them to accept.
+ * Alan Cox : Added TCP options (SOL_TCP)
+ * Alan Cox : Switched wakeup calls to callbacks, so the kernel can layer network sockets.
+ * Alan Cox : Use ip_tos/ip_ttl settings.
+ * Alan Cox : Handle FIN (more) properly (we hope).
+ * Alan Cox : RST frames sent on unsynchronised state ack error/
+ * Alan Cox : Put in missing check for SYN bit.
+ * Alan Cox : Added tcp_select_window() aka NET2E
+ * window non shrink trick.
+ * Alan Cox : Added a couple of small NET2E timer fixes
+ * Charles Hedrick : TCP fixes
+ * Toomas Tamm : TCP window fixes
+ * Alan Cox : Small URG fix to rlogin ^C ack fight
+ * Charles Hedrick : Rewrote most of it to actually work
+ * Linus : Rewrote tcp_read() and URG handling
+ * completely
+ * Gerhard Koerting: Fixed some missing timer handling
+ * Matthew Dillon : Reworked TCP machine states as per RFC
+ * Gerhard Koerting: PC/TCP workarounds
+ * Adam Caldwell : Assorted timer/timing errors
+ * Matthew Dillon : Fixed another RST bug
+ * Alan Cox : Move to kernel side addressing changes.
+ * Alan Cox : Beginning work on TCP fastpathing (not yet usable)
+ * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
+ * Alan Cox : TCP fast path debugging
+ * Alan Cox : Window clamping
+ * Michael Riepe : Bug in tcp_check()
+ * Matt Dillon : More TCP improvements and RST bug fixes
+ * Matt Dillon : Yet more small nasties remove from the TCP code
+ * (Be very nice to this man if tcp finally works 100%) 8)
+ * Alan Cox : BSD accept semantics.
+ * Alan Cox : Reset on closedown bug.
+ * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
+ * Michael Pall : Handle select() after URG properly in all cases.
+ * Michael Pall : Undo the last fix in tcp_read_urg() (multi URG PUSH broke rlogin).
+ * Michael Pall : Fix the multi URG PUSH problem in tcp_readable(), select() after URG works now.
+ * Michael Pall : recv(...,MSG_OOB) never blocks in the BSD api.
+ * Alan Cox : Changed the semantics of sk->socket to
+ * fix a race and a signal problem with
+ * accept() and async I/O.
+ * Alan Cox : Relaxed the rules on tcp_sendto().
+ * Yury Shevchuk : Really fixed accept() blocking problem.
+ * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
+ * clients/servers which listen in on
+ * fixed ports.
+ * Alan Cox : Cleaned the above up and shrank it to
+ * a sensible code size.
+ * Alan Cox : Self connect lockup fix.
+ * Alan Cox : No connect to multicast.
+ * Ross Biro : Close unaccepted children on master
+ * socket close.
+ * Alan Cox : Reset tracing code.
+ * Alan Cox : Spurious resets on shutdown.
+ *
+ *
+ * To Fix:
+ * Fast path the code. Two things here - fix the window calculation
+ * so it doesn't iterate over the queue, also spot packets with no funny
+ * options arriving in order and process directly.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or(at your option) any later version.
+ *
+ * Description of States:
+ *
+ * TCP_SYN_SENT sent a connection request, waiting for ack
+ *
+ * TCP_SYN_RECV received a connection request, sent ack,
+ * waiting for final ack in three-way handshake.
+ *
+ * TCP_ESTABLISHED connection established
+ *
+ * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
+ * transmission of remaining buffered data
+ *
+ * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
+ * to shutdown
+ *
+ * TCP_CLOSING both sides have shutdown but we still have
+ * data we have to finish sending
+ *
+ * TCP_TIME_WAIT timeout to catch resent junk before entering
+ * closed, can only be entered from FIN_WAIT2
+ * or CLOSING. Required because the other end
+ * may not have gotten our last ACK causing it
+ * to retransmit the data packet (which we ignore)
+ *
+ * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
+ * us to finish writing our data and to shutdown
+ * (we have to close() to move on to LAST_ACK)
+ *
+ * TCP_LAST_ACK out side has shutdown after remote has
+ * shutdown. There may still be data in our
+ * buffer that we have to finish sending
+ *
+ * TCP_CLOSE socket is finished
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/termios.h>
+#include <linux/in.h>
+#include <linux/fcntl.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "snmp.h"
+#include "ip.h"
+#include "protocol.h"
+#include "icmp.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "route.h"
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <linux/mm.h>
+
+#undef TCP_FASTPATH
+
+#define SEQ_TICK 3
+unsigned long seq_offset;
+struct tcp_mib tcp_statistics;
+
+static void tcp_close(struct sock *sk, int timeout);
+
+#ifdef TCP_FASTPATH
+unsigned long tcp_rx_miss=0, tcp_rx_hit1=0, tcp_rx_hit2=0;
+#endif
+
+
+static __inline__ int min(unsigned int a, unsigned int b)
+{
+ if (a < b)
+ return(a);
+ return(b);
+}
+
+#undef STATE_TRACE
+
+static __inline__ void tcp_set_state(struct sock *sk, int state)
+{
+ if(sk->state==TCP_ESTABLISHED)
+ tcp_statistics.TcpCurrEstab--;
+#ifdef STATE_TRACE
+ if(sk->debug)
+ printk("TCP sk=%s, State %d -> %d\n",sk, sk->state,state);
+#endif
+ sk->state=state;
+ if(state==TCP_ESTABLISHED)
+ tcp_statistics.TcpCurrEstab++;
+}
+
+/* This routine picks a TCP windows for a socket based on
+ the following constraints
+
+ 1. The window can never be shrunk once it is offered (RFC 793)
+ 2. We limit memory per socket
+
+ For now we use NET2E3's heuristic of offering half the memory
+ we have handy. All is not as bad as this seems however because
+ of two things. Firstly we will bin packets even within the window
+ in order to get the data we are waiting for into the memory limit.
+ Secondly we bin common duplicate forms at receive time
+
+ Better heuristics welcome
+*/
+
+int tcp_select_window(struct sock *sk)
+{
+ int new_window = sk->prot->rspace(sk);
+
+ if(sk->window_clamp)
+ new_window=min(sk->window_clamp,new_window);
+/*
+ * two things are going on here. First, we don't ever offer a
+ * window less than min(sk->mss, MAX_WINDOW/2). This is the
+ * receiver side of SWS as specified in RFC1122.
+ * Second, we always give them at least the window they
+ * had before, in order to avoid retracting window. This
+ * is technically allowed, but RFC1122 advises against it and
+ * in practice it causes trouble.
+ */
+ if (new_window < min(sk->mss, MAX_WINDOW/2) || new_window < sk->window)
+ return(sk->window);
+ return(new_window);
+}
+
+/*
+ * Find someone to 'accept'. Must be called with
+ * sk->inuse=1 or cli()
+ */
+
+static struct sk_buff *tcp_find_established(struct sock *s)
+{
+ struct sk_buff *p=skb_peek(&s->receive_queue);
+ if(p==NULL)
+ return NULL;
+ do
+ {
+ if(p->sk->state == TCP_ESTABLISHED || p->sk->state >= TCP_FIN_WAIT1)
+ return p;
+ p=p->next;
+ }
+ while(p!=skb_peek(&s->receive_queue));
+ return NULL;
+}
+
+
+/*
+ * This routine closes sockets which have been at least partially
+ * opened, but not yet accepted. Currently it is only called by
+ * tcp_close, and timeout mirrors the value there.
+ */
+
+static void tcp_close_pending (struct sock *sk, int timeout)
+{
+ unsigned long flags;
+ struct sk_buff *p, *old_p;
+
+ save_flags(flags);
+ cli();
+ p=skb_peek(&sk->receive_queue);
+
+ if(p==NULL)
+ {
+ restore_flags(flags);
+ return;
+ }
+
+ do
+ {
+ tcp_close (p->sk, timeout);
+ skb_unlink (p);
+ old_p = p;
+ p=p->next;
+ kfree_skb(old_p, FREE_READ);
+ }
+ while(p!=skb_peek(&sk->receive_queue));
+
+ restore_flags(flags);
+ return;
+}
+
+static struct sk_buff *tcp_dequeue_established(struct sock *s)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ skb=tcp_find_established(s);
+ if(skb!=NULL)
+ skb_unlink(skb); /* Take it off the queue */
+ restore_flags(flags);
+ return skb;
+}
+
+
+/*
+ * Enter the time wait state.
+ */
+
+static void tcp_time_wait(struct sock *sk)
+{
+ tcp_set_state(sk,TCP_TIME_WAIT);
+ sk->shutdown = SHUTDOWN_MASK;
+ if (!sk->dead)
+ sk->state_change(sk);
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+}
+
+/*
+ * A timer event has trigger a tcp retransmit timeout. The
+ * socket xmit queue is ready and set up to send. Because
+ * the ack receive code keeps the queue straight we do
+ * nothing clever here.
+ */
+
+static void tcp_retransmit(struct sock *sk, int all)
+{
+ if (all)
+ {
+ ip_retransmit(sk, all);
+ return;
+ }
+
+ sk->ssthresh = sk->cong_window >> 1; /* remember window where we lost */
+ /* sk->ssthresh in theory can be zero. I guess that's OK */
+ sk->cong_count = 0;
+
+ sk->cong_window = 1;
+
+ /* Do the actual retransmit. */
+ ip_retransmit(sk, all);
+}
+
+
+/*
+ * This routine is called by the ICMP module when it gets some
+ * sort of error condition. If err < 0 then the socket should
+ * be closed and the error returned to the user. If err > 0
+ * it's just the icmp type << 8 | icmp code. After adjustment
+ * header points to the first 8 bytes of the tcp header. We need
+ * to find the appropriate port.
+ */
+
+void tcp_err(int err, unsigned char *header, unsigned long daddr,
+ unsigned long saddr, struct inet_protocol *protocol)
+{
+ struct tcphdr *th;
+ struct sock *sk;
+ struct iphdr *iph=(struct iphdr *)header;
+
+ header+=4*iph->ihl;
+
+
+ th =(struct tcphdr *)header;
+ sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
+
+ if (sk == NULL)
+ return;
+
+ if(err<0)
+ {
+ sk->err = -err;
+ sk->error_report(sk);
+ return;
+ }
+
+ if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8))
+ {
+ /*
+ * FIXME:
+ * For now we will just trigger a linear backoff.
+ * The slow start code should cause a real backoff here.
+ */
+ if (sk->cong_window > 4)
+ sk->cong_window--;
+ return;
+ }
+
+/* sk->err = icmp_err_convert[err & 0xff].errno; -- moved as TCP should hide non fatals internally (and does) */
+
+ /*
+ * If we've already connected we will keep trying
+ * until we time out, or the user gives up.
+ */
+
+ if (icmp_err_convert[err & 0xff].fatal || sk->state == TCP_SYN_SENT)
+ {
+ if (sk->state == TCP_SYN_SENT)
+ {
+ tcp_statistics.TcpAttemptFails++;
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
+ }
+ sk->err = icmp_err_convert[err & 0xff].errno;
+ }
+ return;
+}
+
+
+/*
+ * Walk down the receive queue counting readable data until we hit the end or we find a gap
+ * in the received data queue (ie a frame missing that needs sending to us)
+ */
+
+static int tcp_readable(struct sock *sk)
+{
+ unsigned long counted;
+ unsigned long amount;
+ struct sk_buff *skb;
+ int sum;
+ unsigned long flags;
+
+ if(sk && sk->debug)
+ printk("tcp_readable: %p - ",sk);
+
+ save_flags(flags);
+ cli();
+ if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
+ {
+ restore_flags(flags);
+ if(sk && sk->debug)
+ printk("empty\n");
+ return(0);
+ }
+
+ counted = sk->copied_seq+1; /* Where we are at the moment */
+ amount = 0;
+
+ /* Do until a push or until we are out of data. */
+ do
+ {
+ if (before(counted, skb->h.th->seq)) /* Found a hole so stops here */
+ break;
+ sum = skb->len -(counted - skb->h.th->seq); /* Length - header but start from where we are up to (avoid overlaps) */
+ if (skb->h.th->syn)
+ sum++;
+ if (sum > 0)
+ { /* Add it up, move on */
+ amount += sum;
+ if (skb->h.th->syn)
+ amount--;
+ counted += sum;
+ }
+ /*
+ * Don't count urg data ... but do it in the right place!
+ * Consider: "old_data (ptr is here) URG PUSH data"
+ * The old code would stop at the first push because
+ * it counted the urg (amount==1) and then does amount--
+ * *after* the loop. This means tcp_readable() always
+ * returned zero if any URG PUSH was in the queue, even
+ * though there was normal data available. If we subtract
+ * the urg data right here, we even get it to work for more
+ * than one URG PUSH skb without normal data.
+ * This means that select() finally works now with urg data
+ * in the queue. Note that rlogin was never affected
+ * because it doesn't use select(); it uses two processes
+ * and a blocking read(). And the queue scan in tcp_read()
+ * was correct. Mike <pall@rz.uni-karlsruhe.de>
+ */
+ if (skb->h.th->urg)
+ amount--; /* don't count urg data */
+ if (amount && skb->h.th->psh) break;
+ skb = skb->next;
+ }
+ while(skb != (struct sk_buff *)&sk->receive_queue);
+
+ restore_flags(flags);
+ if(sk->debug)
+ printk("got %lu bytes.\n",amount);
+ return(amount);
+}
+
+
+/*
+ * Wait for a TCP event. Note the oddity with SEL_IN and reading. The
+ * listening socket has a receive queue of sockets to accept.
+ */
+
+static int tcp_select(struct sock *sk, int sel_type, select_table *wait)
+{
+ sk->inuse = 1;
+
+ switch(sel_type)
+ {
+ case SEL_IN:
+ select_wait(sk->sleep, wait);
+ if (skb_peek(&sk->receive_queue) != NULL)
+ {
+ if ((sk->state == TCP_LISTEN && tcp_find_established(sk)) || tcp_readable(sk))
+ {
+ release_sock(sk);
+ return(1);
+ }
+ }
+ if (sk->err != 0) /* Receiver error */
+ {
+ release_sock(sk);
+ return(1);
+ }
+ if (sk->shutdown & RCV_SHUTDOWN)
+ {
+ release_sock(sk);
+ return(1);
+ }
+ release_sock(sk);
+ return(0);
+ case SEL_OUT:
+ select_wait(sk->sleep, wait);
+ if (sk->shutdown & SEND_SHUTDOWN)
+ {
+ /* FIXME: should this return an error? */
+ release_sock(sk);
+ return(0);
+ }
+
+ /*
+ * This is now right thanks to a small fix
+ * by Matt Dillon.
+ */
+
+ if (sk->prot->wspace(sk) >= sk->mtu+128+sk->prot->max_header)
+ {
+ release_sock(sk);
+ /* This should cause connect to work ok. */
+ if (sk->state == TCP_SYN_RECV ||
+ sk->state == TCP_SYN_SENT) return(0);
+ return(1);
+ }
+ release_sock(sk);
+ return(0);
+ case SEL_EX:
+ select_wait(sk->sleep,wait);
+ if (sk->err || sk->urg_data)
+ {
+ release_sock(sk);
+ return(1);
+ }
+ release_sock(sk);
+ return(0);
+ }
+
+ release_sock(sk);
+ return(0);
+}
+
+
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ int err;
+ switch(cmd)
+ {
+
+ case TIOCINQ:
+#ifdef FIXME /* FIXME: */
+ case FIONREAD:
+#endif
+ {
+ unsigned long amount;
+
+ if (sk->state == TCP_LISTEN)
+ return(-EINVAL);
+
+ sk->inuse = 1;
+ amount = tcp_readable(sk);
+ release_sock(sk);
+ err=verify_area(VERIFY_WRITE,(void *)arg,
+ sizeof(unsigned long));
+ if(err)
+ return err;
+ put_fs_long(amount,(unsigned long *)arg);
+ return(0);
+ }
+ case SIOCATMARK:
+ {
+ int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
+
+ err = verify_area(VERIFY_WRITE,(void *) arg,
+ sizeof(unsigned long));
+ if (err)
+ return err;
+ put_fs_long(answ,(int *) arg);
+ return(0);
+ }
+ case TIOCOUTQ:
+ {
+ unsigned long amount;
+
+ if (sk->state == TCP_LISTEN) return(-EINVAL);
+ amount = sk->prot->wspace(sk);
+ err=verify_area(VERIFY_WRITE,(void *)arg,
+ sizeof(unsigned long));
+ if(err)
+ return err;
+ put_fs_long(amount,(unsigned long *)arg);
+ return(0);
+ }
+ default:
+ return(-EINVAL);
+ }
+}
+
+
+/*
+ * This routine computes a TCP checksum.
+ */
+
+unsigned short tcp_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr)
+{
+ unsigned long sum;
+
+ if (saddr == 0) saddr = ip_my_addr();
+
+/*
+ * stupid, gcc complains when I use just one __asm__ block,
+ * something about too many reloads, but this is just two
+ * instructions longer than what I want
+ */
+ __asm__("
+ addl %%ecx, %%ebx
+ adcl %%edx, %%ebx
+ adcl $0, %%ebx
+ "
+ : "=b"(sum)
+ : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
+ : "bx", "cx", "dx" );
+ __asm__("
+ movl %%ecx, %%edx
+ cld
+ cmpl $32, %%ecx
+ jb 2f
+ shrl $5, %%ecx
+ clc
+1: lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ lodsl
+ adcl %%eax, %%ebx
+ loop 1b
+ adcl $0, %%ebx
+ movl %%edx, %%ecx
+2: andl $28, %%ecx
+ je 4f
+ shrl $2, %%ecx
+ clc
+3: lodsl
+ adcl %%eax, %%ebx
+ loop 3b
+ adcl $0, %%ebx
+4: movl $0, %%eax
+ testw $2, %%dx
+ je 5f
+ lodsw
+ addl %%eax, %%ebx
+ adcl $0, %%ebx
+ movw $0, %%ax
+5: test $1, %%edx
+ je 6f
+ lodsb
+ addl %%eax, %%ebx
+ adcl $0, %%ebx
+6: movl %%ebx, %%eax
+ shrl $16, %%eax
+ addw %%ax, %%bx
+ adcw $0, %%bx
+ "
+ : "=b"(sum)
+ : "0"(sum), "c"(len), "S"(th)
+ : "ax", "bx", "cx", "dx", "si" );
+
+ /* We only want the bottom 16 bits, but we never cleared the top 16. */
+
+ return((~sum) & 0xffff);
+}
+
+
+
+void tcp_send_check(struct tcphdr *th, unsigned long saddr,
+ unsigned long daddr, int len, struct sock *sk)
+{
+ th->check = 0;
+ th->check = tcp_check(th, len, saddr, daddr);
+ return;
+}
+
+static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int size;
+ struct tcphdr * th = skb->h.th;
+
+ /* length of packet (not counting length of pre-tcp headers) */
+ size = skb->len - ((unsigned char *) th - skb->data);
+
+ /* sanity check it.. */
+ if (size < sizeof(struct tcphdr) || size > skb->len)
+ {
+ printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
+ skb, skb->data, th, skb->len);
+ kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+
+ /* If we have queued a header size packet.. */
+ if (size == sizeof(struct tcphdr))
+ {
+ /* If its got a syn or fin its notionally included in the size..*/
+ if(!th->syn && !th->fin)
+ {
+ printk("tcp_send_skb: attempt to queue a bogon.\n");
+ kfree_skb(skb,FREE_WRITE);
+ return;
+ }
+ }
+
+ tcp_statistics.TcpOutSegs++;
+
+ skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
+ if (after(skb->h.seq, sk->window_seq) ||
+ (sk->retransmits && sk->timeout == TIME_WRITE) ||
+ sk->packets_out >= sk->cong_window)
+ {
+ /* checksum will be supplied by tcp_write_xmit. So
+ * we shouldn't need to set it at all. I'm being paranoid */
+ th->check = 0;
+ if (skb->next != NULL)
+ {
+ printk("tcp_send_partial: next != NULL\n");
+ skb_unlink(skb);
+ }
+ skb_queue_tail(&sk->write_queue, skb);
+ if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
+ sk->send_head == NULL &&
+ sk->ack_backlog == 0)
+ reset_timer(sk, TIME_PROBE0, sk->rto);
+ }
+ else
+ {
+ th->ack_seq = ntohl(sk->acked_seq);
+ th->window = ntohs(tcp_select_window(sk));
+
+ tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
+
+ sk->sent_seq = sk->write_seq;
+ sk->prot->queue_xmit(sk, skb->dev, skb, 0);
+ }
+}
+
+struct sk_buff * tcp_dequeue_partial(struct sock * sk)
+{
+ struct sk_buff * skb;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ skb = sk->partial;
+ if (skb) {
+ sk->partial = NULL;
+ del_timer(&sk->partial_timer);
+ }
+ restore_flags(flags);
+ return skb;
+}
+
+static void tcp_send_partial(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ if (sk == NULL)
+ return;
+ while ((skb = tcp_dequeue_partial(sk)) != NULL)
+ tcp_send_skb(sk, skb);
+}
+
+void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
+{
+ struct sk_buff * tmp;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ tmp = sk->partial;
+ if (tmp)
+ del_timer(&sk->partial_timer);
+ sk->partial = skb;
+ init_timer(&sk->partial_timer);
+ sk->partial_timer.expires = HZ;
+ sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
+ sk->partial_timer.data = (unsigned long) sk;
+ add_timer(&sk->partial_timer);
+ restore_flags(flags);
+ if (tmp)
+ tcp_send_skb(sk, tmp);
+}
+
+
+/*
+ * This routine sends an ack and also updates the window.
+ */
+
+static void tcp_send_ack(unsigned long sequence, unsigned long ack,
+ struct sock *sk,
+ struct tcphdr *th, unsigned long daddr)
+{
+ struct sk_buff *buff;
+ struct tcphdr *t1;
+ struct device *dev = NULL;
+ int tmp;
+
+ if(sk->zapped)
+ return; /* We have been reset, we may not send again */
+ /*
+ * We need to grab some memory, and put together an ack,
+ * and then put it into the queue to be sent.
+ */
+
+ buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
+ if (buff == NULL)
+ {
+ /* Force it to send an ack. */
+ sk->ack_backlog++;
+ if (sk->timeout != TIME_WRITE && tcp_connected(sk->state))
+ {
+ reset_timer(sk, TIME_WRITE, 10);
+ }
+ return;
+ }
+
+ buff->len = sizeof(struct tcphdr);
+ buff->sk = sk;
+ buff->localroute = sk->localroute;
+ t1 =(struct tcphdr *) buff->data;
+
+ /* Put in the IP header and routing stuff. */
+ tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
+ IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ buff->free=1;
+ sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
+ return;
+ }
+ buff->len += tmp;
+ t1 =(struct tcphdr *)((char *)t1 +tmp);
+
+ /* FIXME: */
+ memcpy(t1, th, sizeof(*t1)); /* this should probably be removed */
+
+ /*
+ * Swap the send and the receive.
+ */
+
+ t1->dest = th->source;
+ t1->source = th->dest;
+ t1->seq = ntohl(sequence);
+ t1->ack = 1;
+ sk->window = tcp_select_window(sk);
+ t1->window = ntohs(sk->window);
+ t1->res1 = 0;
+ t1->res2 = 0;
+ t1->rst = 0;
+ t1->urg = 0;
+ t1->syn = 0;
+ t1->psh = 0;
+ t1->fin = 0;
+ if (ack == sk->acked_seq)
+ {
+ sk->ack_backlog = 0;
+ sk->bytes_rcv = 0;
+ sk->ack_timed = 0;
+ if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
+ && sk->timeout == TIME_WRITE)
+ {
+ if(sk->keepopen) {
+ reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
+ } else {
+ delete_timer(sk);
+ }
+ }
+ }
+ t1->ack_seq = ntohl(ack);
+ t1->doff = sizeof(*t1)/4;
+ tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
+ if (sk->debug)
+ printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
+ tcp_statistics.TcpOutSegs++;
+ sk->prot->queue_xmit(sk, dev, buff, 1);
+}
+
+
+/*
+ * This routine builds a generic TCP header.
+ */
+
+static int tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
+{
+
+ /* FIXME: want to get rid of this. */
+ memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
+ th->seq = htonl(sk->write_seq);
+ th->psh =(push == 0) ? 1 : 0;
+ th->doff = sizeof(*th)/4;
+ th->ack = 1;
+ th->fin = 0;
+ sk->ack_backlog = 0;
+ sk->bytes_rcv = 0;
+ sk->ack_timed = 0;
+ th->ack_seq = htonl(sk->acked_seq);
+ sk->window = tcp_select_window(sk);
+ th->window = htons(sk->window);
+
+ return(sizeof(*th));
+}
+
+/*
+ * This routine copies from a user buffer into a socket,
+ * and starts the transmit system.
+ */
+
+static int tcp_write(struct sock *sk, unsigned char *from,
+ int len, int nonblock, unsigned flags)
+{
+ int copied = 0;
+ int copy;
+ int tmp;
+ struct sk_buff *skb;
+ struct sk_buff *send_tmp;
+ unsigned char *buff;
+ struct proto *prot;
+ struct device *dev = NULL;
+
+ sk->inuse=1;
+ prot = sk->prot;
+ while(len > 0)
+ {
+ if (sk->err)
+ { /* Stop on an error */
+ release_sock(sk);
+ if (copied)
+ return(copied);
+ tmp = -sk->err;
+ sk->err = 0;
+ return(tmp);
+ }
+
+ /*
+ * First thing we do is make sure that we are established.
+ */
+
+ if (sk->shutdown & SEND_SHUTDOWN)
+ {
+ release_sock(sk);
+ sk->err = EPIPE;
+ if (copied)
+ return(copied);
+ sk->err = 0;
+ return(-EPIPE);
+ }
+
+
+ /*
+ * Wait for a connection to finish.
+ */
+
+ while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
+ {
+ if (sk->err)
+ {
+ release_sock(sk);
+ if (copied)
+ return(copied);
+ tmp = -sk->err;
+ sk->err = 0;
+ return(tmp);
+ }
+
+ if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
+ {
+ release_sock(sk);
+ if (copied)
+ return(copied);
+
+ if (sk->err)
+ {
+ tmp = -sk->err;
+ sk->err = 0;
+ return(tmp);
+ }
+
+ if (sk->keepopen)
+ {
+ send_sig(SIGPIPE, current, 0);
+ }
+ return(-EPIPE);
+ }
+
+ if (nonblock || copied)
+ {
+ release_sock(sk);
+ if (copied)
+ return(copied);
+ return(-EAGAIN);
+ }
+
+ release_sock(sk);
+ cli();
+
+ if (sk->state != TCP_ESTABLISHED &&
+ sk->state != TCP_CLOSE_WAIT && sk->err == 0)
+ {
+ interruptible_sleep_on(sk->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ if (copied)
+ return(copied);
+ return(-ERESTARTSYS);
+ }
+ }
+ sk->inuse = 1;
+ sti();
+ }
+
+ /*
+ * The following code can result in copy <= if sk->mss is ever
+ * decreased. It shouldn't be. sk->mss is min(sk->mtu, sk->max_window).
+ * sk->mtu is constant once SYN processing is finished. I.e. we
+ * had better not get here until we've seen his SYN and at least one
+ * valid ack. (The SYN sets sk->mtu and the ack sets sk->max_window.)
+ * But ESTABLISHED should guarantee that. sk->max_window is by definition
+ * non-decreasing. Note that any ioctl to set user_mss must be done
+ * before the exchange of SYN's. If the initial ack from the other
+ * end has a window of 0, max_window and thus mss will both be 0.
+ */
+
+ /*
+ * Now we need to check if we have a half built packet.
+ */
+
+ if ((skb = tcp_dequeue_partial(sk)) != NULL)
+ {
+ int hdrlen;
+
+ /* IP header + TCP header */
+ hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
+ + sizeof(struct tcphdr);
+
+ /* Add more stuff to the end of skb->len */
+ if (!(flags & MSG_OOB))
+ {
+ copy = min(sk->mss - (skb->len - hdrlen), len);
+ /* FIXME: this is really a bug. */
+ if (copy <= 0)
+ {
+ printk("TCP: **bug**: \"copy\" <= 0!!\n");
+ copy = 0;
+ }
+
+ memcpy_fromfs(skb->data + skb->len, from, copy);
+ skb->len += copy;
+ from += copy;
+ copied += copy;
+ len -= copy;
+ sk->write_seq += copy;
+ }
+ if ((skb->len - hdrlen) >= sk->mss ||
+ (flags & MSG_OOB) || !sk->packets_out)
+ tcp_send_skb(sk, skb);
+ else
+ tcp_enqueue_partial(skb, sk);
+ continue;
+ }
+
+ /*
+ * We also need to worry about the window.
+ * If window < 1/2 the maximum window we've seen from this
+ * host, don't use it. This is sender side
+ * silly window prevention, as specified in RFC1122.
+ * (Note that this is different than earlier versions of
+ * SWS prevention, e.g. RFC813.). What we actually do is
+ * use the whole MSS. Since the results in the right
+ * edge of the packet being outside the window, it will
+ * be queued for later rather than sent.
+ */
+
+ copy = sk->window_seq - sk->write_seq;
+ if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
+ copy = sk->mss;
+ if (copy > len)
+ copy = len;
+
+ /*
+ * We should really check the window here also.
+ */
+
+ send_tmp = NULL;
+ if (copy < sk->mss && !(flags & MSG_OOB))
+ {
+ /*
+ * We will release the socket incase we sleep here.
+ */
+ release_sock(sk);
+ /*
+ * NB: following must be mtu, because mss can be increased.
+ * mss is always <= mtu
+ */
+ skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header, 0, GFP_KERNEL);
+ sk->inuse = 1;
+ send_tmp = skb;
+ }
+ else
+ {
+ /*
+ * We will release the socket incase we sleep here.
+ */
+ release_sock(sk);
+ skb = prot->wmalloc(sk, copy + prot->max_header , 0, GFP_KERNEL);
+ sk->inuse = 1;
+ }
+
+ /*
+ * If we didn't get any memory, we need to sleep.
+ */
+
+ if (skb == NULL)
+ {
+ if (nonblock)
+ {
+ release_sock(sk);
+ if (copied)
+ return(copied);
+ return(-EAGAIN);
+ }
+
+ /*
+ * FIXME: here is another race condition.
+ */
+
+ tmp = sk->wmem_alloc;
+ release_sock(sk);
+ cli();
+ /*
+ * Again we will try to avoid it.
+ */
+ if (tmp <= sk->wmem_alloc &&
+ (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
+ && sk->err == 0)
+ {
+ interruptible_sleep_on(sk->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ if (copied)
+ return(copied);
+ return(-ERESTARTSYS);
+ }
+ }
+ sk->inuse = 1;
+ sti();
+ continue;
+ }
+
+ skb->len = 0;
+ skb->sk = sk;
+ skb->free = 0;
+ skb->localroute = sk->localroute|(flags&MSG_DONTROUTE);
+
+ buff = skb->data;
+
+ /*
+ * FIXME: we need to optimize this.
+ * Perhaps some hints here would be good.
+ */
+
+ tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
+ IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0 )
+ {
+ prot->wfree(sk, skb->mem_addr, skb->mem_len);
+ release_sock(sk);
+ if (copied)
+ return(copied);
+ return(tmp);
+ }
+ skb->len += tmp;
+ skb->dev = dev;
+ buff += tmp;
+ skb->h.th =(struct tcphdr *) buff;
+ tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
+ if (tmp < 0)
+ {
+ prot->wfree(sk, skb->mem_addr, skb->mem_len);
+ release_sock(sk);
+ if (copied)
+ return(copied);
+ return(tmp);
+ }
+
+ if (flags & MSG_OOB)
+ {
+ ((struct tcphdr *)buff)->urg = 1;
+ ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
+ }
+ skb->len += tmp;
+ memcpy_fromfs(buff+tmp, from, copy);
+
+ from += copy;
+ copied += copy;
+ len -= copy;
+ skb->len += copy;
+ skb->free = 0;
+ sk->write_seq += copy;
+
+ if (send_tmp != NULL && sk->packets_out)
+ {
+ tcp_enqueue_partial(send_tmp, sk);
+ continue;
+ }
+ tcp_send_skb(sk, skb);
+ }
+ sk->err = 0;
+
+/*
+ * Nagle's rule. Turn Nagle off with TCP_NODELAY for highly
+ * interactive fast network servers. It's meant to be on and
+ * it really improves the throughput though not the echo time
+ * on my slow slip link - Alan
+ */
+
+/*
+ * Avoid possible race on send_tmp - c/o Johannes Stille
+ */
+
+ if(sk->partial && ((!sk->packets_out)
+ /* If not nagling we can send on the before case too.. */
+ || (sk->nonagle && before(sk->write_seq , sk->window_seq))
+ ))
+ tcp_send_partial(sk);
+
+ release_sock(sk);
+ return(copied);
+}
+
+
+static int tcp_sendto(struct sock *sk, unsigned char *from,
+ int len, int nonblock, unsigned flags,
+ struct sockaddr_in *addr, int addr_len)
+{
+ if (flags & ~(MSG_OOB|MSG_DONTROUTE))
+ return -EINVAL;
+ if (sk->state == TCP_CLOSE)
+ return -ENOTCONN;
+ if (addr_len < sizeof(*addr))
+ return -EINVAL;
+ if (addr->sin_family && addr->sin_family != AF_INET)
+ return -EINVAL;
+ if (addr->sin_port != sk->dummy_th.dest)
+ return -EISCONN;
+ if (addr->sin_addr.s_addr != sk->daddr)
+ return -EISCONN;
+ return tcp_write(sk, from, len, nonblock, flags);
+}
+
+
+static void tcp_read_wakeup(struct sock *sk)
+{
+ int tmp;
+ struct device *dev = NULL;
+ struct tcphdr *t1;
+ struct sk_buff *buff;
+
+ if (!sk->ack_backlog)
+ return;
+
+ /*
+ * FIXME: we need to put code here to prevent this routine from
+ * being called. Being called once in a while is ok, so only check
+ * if this is the second time in a row.
+ */
+
+ /*
+ * We need to grab some memory, and put together an ack,
+ * and then put it into the queue to be sent.
+ */
+
+ buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
+ if (buff == NULL)
+ {
+ /* Try again real soon. */
+ reset_timer(sk, TIME_WRITE, 10);
+ return;
+ }
+
+ buff->len = sizeof(struct tcphdr);
+ buff->sk = sk;
+ buff->localroute = sk->localroute;
+
+ /*
+ * Put in the IP header and routing stuff.
+ */
+
+ tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
+ IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ buff->free=1;
+ sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
+ return;
+ }
+
+ buff->len += tmp;
+ t1 =(struct tcphdr *)(buff->data +tmp);
+
+ memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
+ t1->seq = htonl(sk->sent_seq);
+ t1->ack = 1;
+ t1->res1 = 0;
+ t1->res2 = 0;
+ t1->rst = 0;
+ t1->urg = 0;
+ t1->syn = 0;
+ t1->psh = 0;
+ sk->ack_backlog = 0;
+ sk->bytes_rcv = 0;
+ sk->window = tcp_select_window(sk);
+ t1->window = ntohs(sk->window);
+ t1->ack_seq = ntohl(sk->acked_seq);
+ t1->doff = sizeof(*t1)/4;
+ tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
+ sk->prot->queue_xmit(sk, dev, buff, 1);
+ tcp_statistics.TcpOutSegs++;
+}
+
+
+/*
+ * FIXME:
+ * This routine frees used buffers.
+ * It should consider sending an ACK to let the
+ * other end know we now have a bigger window.
+ */
+
+static void cleanup_rbuf(struct sock *sk)
+{
+ unsigned long flags;
+ unsigned long left;
+ struct sk_buff *skb;
+ unsigned long rspace;
+
+ if(sk->debug)
+ printk("cleaning rbuf for sk=%p\n", sk);
+
+ save_flags(flags);
+ cli();
+
+ left = sk->prot->rspace(sk);
+
+ /*
+ * We have to loop through all the buffer headers,
+ * and try to free up all the space we can.
+ */
+
+ while((skb=skb_peek(&sk->receive_queue)) != NULL)
+ {
+ if (!skb->used)
+ break;
+ skb_unlink(skb);
+ skb->sk = sk;
+ kfree_skb(skb, FREE_READ);
+ }
+
+ restore_flags(flags);
+
+ /*
+ * FIXME:
+ * At this point we should send an ack if the difference
+ * in the window, and the amount of space is bigger than
+ * TCP_WINDOW_DIFF.
+ */
+
+ if(sk->debug)
+ printk("sk->rspace = %lu, was %lu\n", sk->prot->rspace(sk),
+ left);
+ if ((rspace=sk->prot->rspace(sk)) != left)
+ {
+ /*
+ * This area has caused the most trouble. The current strategy
+ * is to simply do nothing if the other end has room to send at
+ * least 3 full packets, because the ack from those will auto-
+ * matically update the window. If the other end doesn't think
+ * we have much space left, but we have room for at least 1 more
+ * complete packet than it thinks we do, we will send an ack
+ * immediately. Otherwise we will wait up to .5 seconds in case
+ * the user reads some more.
+ */
+ sk->ack_backlog++;
+ /*
+ * It's unclear whether to use sk->mtu or sk->mss here. They differ only
+ * if the other end is offering a window smaller than the agreed on MSS
+ * (called sk->mtu here). In theory there's no connection between send
+ * and receive, and so no reason to think that they're going to send
+ * small packets. For the moment I'm using the hack of reducing the mss
+ * only on the send side, so I'm putting mtu here.
+ */
+
+ if (rspace > (sk->window - sk->bytes_rcv + sk->mtu))
+ {
+ /* Send an ack right now. */
+ tcp_read_wakeup(sk);
+ }
+ else
+ {
+ /* Force it to send an ack soon. */
+ int was_active = del_timer(&sk->timer);
+ if (!was_active || TCP_ACK_TIME < sk->timer.expires)
+ {
+ reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
+ }
+ else
+ add_timer(&sk->timer);
+ }
+ }
+}
+
+
+/*
+ * Handle reading urgent data.
+ */
+
+static int tcp_read_urg(struct sock * sk, int nonblock,
+ unsigned char *to, int len, unsigned flags)
+{
+ if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
+ return -EINVAL;
+ if (sk->err)
+ {
+ int tmp = -sk->err;
+ sk->err = 0;
+ return tmp;
+ }
+
+ if (sk->state == TCP_CLOSE || sk->done)
+ {
+ if (!sk->done) {
+ sk->done = 1;
+ return 0;
+ }
+ return -ENOTCONN;
+ }
+
+ if (sk->shutdown & RCV_SHUTDOWN)
+ {
+ sk->done = 1;
+ return 0;
+ }
+ sk->inuse = 1;
+ if (sk->urg_data & URG_VALID)
+ {
+ char c = sk->urg_data;
+ if (!(flags & MSG_PEEK))
+ sk->urg_data = URG_READ;
+ put_fs_byte(c, to);
+ release_sock(sk);
+ return 1;
+ }
+ release_sock(sk);
+
+ /*
+ * Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
+ * the available implementations agree in this case:
+ * this call should never block, independent of the
+ * blocking state of the socket.
+ * Mike <pall@rz.uni-karlsruhe.de>
+ */
+ return -EAGAIN;
+}
+
+
+/*
+ * This routine copies from a sock struct into the user buffer.
+ */
+
+static int tcp_read(struct sock *sk, unsigned char *to,
+ int len, int nonblock, unsigned flags)
+{
+ struct wait_queue wait = { current, NULL };
+ int copied = 0;
+ unsigned long peek_seq;
+ unsigned long *seq;
+ unsigned long used;
+
+ /* This error should be checked. */
+ if (sk->state == TCP_LISTEN)
+ return -ENOTCONN;
+
+ /* Urgent data needs to be handled specially. */
+ if (flags & MSG_OOB)
+ return tcp_read_urg(sk, nonblock, to, len, flags);
+
+ peek_seq = sk->copied_seq;
+ seq = &sk->copied_seq;
+ if (flags & MSG_PEEK)
+ seq = &peek_seq;
+
+ add_wait_queue(sk->sleep, &wait);
+ sk->inuse = 1;
+ while (len > 0)
+ {
+ struct sk_buff * skb;
+ unsigned long offset;
+
+ /*
+ * are we at urgent data? Stop if we have read anything.
+ */
+ if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
+ break;
+
+ current->state = TASK_INTERRUPTIBLE;
+
+ skb = skb_peek(&sk->receive_queue);
+ do
+ {
+ if (!skb)
+ break;
+ if (before(1+*seq, skb->h.th->seq))
+ break;
+ offset = 1 + *seq - skb->h.th->seq;
+ if (skb->h.th->syn)
+ offset--;
+ if (offset < skb->len)
+ goto found_ok_skb;
+ if (!(flags & MSG_PEEK))
+ skb->used = 1;
+ skb = skb->next;
+ }
+ while (skb != (struct sk_buff *)&sk->receive_queue);
+
+ if (copied)
+ break;
+
+ if (sk->err)
+ {
+ copied = -sk->err;
+ sk->err = 0;
+ break;
+ }
+
+ if (sk->state == TCP_CLOSE)
+ {
+ if (!sk->done)
+ {
+ sk->done = 1;
+ break;
+ }
+ copied = -ENOTCONN;
+ break;
+ }
+
+ if (sk->shutdown & RCV_SHUTDOWN)
+ {
+ sk->done = 1;
+ break;
+ }
+
+ if (nonblock)
+ {
+ copied = -EAGAIN;
+ break;
+ }
+
+ cleanup_rbuf(sk);
+ release_sock(sk);
+ schedule();
+ sk->inuse = 1;
+
+ if (current->signal & ~current->blocked)
+ {
+ copied = -ERESTARTSYS;
+ break;
+ }
+ continue;
+
+ found_ok_skb:
+ /* Ok so how much can we use ? */
+ used = skb->len - offset;
+ if (len < used)
+ used = len;
+ /* do we have urgent data here? */
+ if (sk->urg_data)
+ {
+ unsigned long urg_offset = sk->urg_seq - (1 + *seq);
+ if (urg_offset < used)
+ {
+ if (!urg_offset)
+ {
+ if (!sk->urginline)
+ {
+ ++*seq;
+ offset++;
+ used--;
+ }
+ }
+ else
+ used = urg_offset;
+ }
+ }
+ /* Copy it */
+ memcpy_tofs(to,((unsigned char *)skb->h.th) +
+ skb->h.th->doff*4 + offset, used);
+ copied += used;
+ len -= used;
+ to += used;
+ *seq += used;
+ if (after(sk->copied_seq+1,sk->urg_seq))
+ sk->urg_data = 0;
+ if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
+ skb->used = 1;
+ }
+ remove_wait_queue(sk->sleep, &wait);
+ current->state = TASK_RUNNING;
+
+ /* Clean up data we have read: This will do ACK frames */
+ cleanup_rbuf(sk);
+ release_sock(sk);
+ return copied;
+}
+
+
+/*
+ * Shutdown the sending side of a connection.
+ */
+
+void tcp_shutdown(struct sock *sk, int how)
+{
+ struct sk_buff *buff;
+ struct tcphdr *t1, *th;
+ struct proto *prot;
+ int tmp;
+ struct device *dev = NULL;
+
+ /*
+ * We need to grab some memory, and put together a FIN,
+ * and then put it into the queue to be sent.
+ * FIXME:
+ *
+ * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
+ * Most of this is guesswork, so maybe it will work...
+ */
+
+ if (!(how & SEND_SHUTDOWN))
+ return;
+
+ /*
+ * If we've already sent a FIN, return.
+ */
+
+ if (sk->state == TCP_FIN_WAIT1 ||
+ sk->state == TCP_FIN_WAIT2 ||
+ sk->state == TCP_CLOSING ||
+ sk->state == TCP_LAST_ACK ||
+ sk->state == TCP_TIME_WAIT
+ )
+ {
+ return;
+ }
+ sk->inuse = 1;
+
+ /*
+ * flag that the sender has shutdown
+ */
+
+ sk->shutdown |= SEND_SHUTDOWN;
+
+ /*
+ * Clear out any half completed packets.
+ */
+
+ if (sk->partial)
+ tcp_send_partial(sk);
+
+ prot =(struct proto *)sk->prot;
+ th =(struct tcphdr *)&sk->dummy_th;
+ release_sock(sk); /* incase the malloc sleeps. */
+ buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
+ if (buff == NULL)
+ return;
+ sk->inuse = 1;
+
+ buff->sk = sk;
+ buff->len = sizeof(*t1);
+ buff->localroute = sk->localroute;
+ t1 =(struct tcphdr *) buff->data;
+
+ /*
+ * Put in the IP header and routing stuff.
+ */
+
+ tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
+ IPPROTO_TCP, sk->opt,
+ sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ /*
+ * Finish anyway, treat this as a send that got lost.
+ *
+ * Enter FIN_WAIT1 on normal shutdown, which waits for
+ * written data to be completely acknowledged along
+ * with an acknowledge to our FIN.
+ *
+ * Enter FIN_WAIT2 on abnormal shutdown -- close before
+ * connection established.
+ */
+ buff->free=1;
+ prot->wfree(sk,buff->mem_addr, buff->mem_len);
+
+ if (sk->state == TCP_ESTABLISHED)
+ tcp_set_state(sk,TCP_FIN_WAIT1);
+ else if(sk->state == TCP_CLOSE_WAIT)
+ tcp_set_state(sk,TCP_LAST_ACK);
+ else
+ tcp_set_state(sk,TCP_FIN_WAIT2);
+
+ release_sock(sk);
+ return;
+ }
+
+ t1 =(struct tcphdr *)((char *)t1 +tmp);
+ buff->len += tmp;
+ buff->dev = dev;
+ memcpy(t1, th, sizeof(*t1));
+ t1->seq = ntohl(sk->write_seq);
+ sk->write_seq++;
+ buff->h.seq = sk->write_seq;
+ t1->ack = 1;
+ t1->ack_seq = ntohl(sk->acked_seq);
+ t1->window = ntohs(sk->window=tcp_select_window(sk));
+ t1->fin = 1;
+ t1->rst = 0;
+ t1->doff = sizeof(*t1)/4;
+ tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
+
+ /*
+ * If there is data in the write queue, the fin must be appended to
+ * the write queue.
+ */
+
+ if (skb_peek(&sk->write_queue) != NULL)
+ {
+ buff->free=0;
+ if (buff->next != NULL)
+ {
+ printk("tcp_shutdown: next != NULL\n");
+ skb_unlink(buff);
+ }
+ skb_queue_tail(&sk->write_queue, buff);
+ }
+ else
+ {
+ sk->sent_seq = sk->write_seq;
+ sk->prot->queue_xmit(sk, dev, buff, 0);
+ }
+
+ if (sk->state == TCP_ESTABLISHED)
+ tcp_set_state(sk,TCP_FIN_WAIT1);
+ else if (sk->state == TCP_CLOSE_WAIT)
+ tcp_set_state(sk,TCP_LAST_ACK);
+ else
+ tcp_set_state(sk,TCP_FIN_WAIT2);
+
+ release_sock(sk);
+}
+
+
+static int
+tcp_recvfrom(struct sock *sk, unsigned char *to,
+ int to_len, int nonblock, unsigned flags,
+ struct sockaddr_in *addr, int *addr_len)
+{
+ int result;
+
+ /*
+ * Have to check these first unlike the old code. If
+ * we check them after we lose data on an error
+ * which is wrong
+ */
+
+ if(addr_len)
+ *addr_len = sizeof(*addr);
+ result=tcp_read(sk, to, to_len, nonblock, flags);
+
+ if (result < 0)
+ return(result);
+
+ if(addr)
+ {
+ addr->sin_family = AF_INET;
+ addr->sin_port = sk->dummy_th.dest;
+ addr->sin_addr.s_addr = sk->daddr;
+ }
+ return(result);
+}
+
+
+/*
+ * This routine will send an RST to the other tcp.
+ */
+
+static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
+ struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
+{
+ struct sk_buff *buff;
+ struct tcphdr *t1;
+ int tmp;
+ struct device *ndev=NULL;
+
+/*
+ * We need to grab some memory, and put together an RST,
+ * and then put it into the queue to be sent.
+ */
+
+ buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
+ if (buff == NULL)
+ return;
+
+ buff->len = sizeof(*t1);
+ buff->sk = NULL;
+ buff->dev = dev;
+ buff->localroute = 0;
+
+ t1 =(struct tcphdr *) buff->data;
+
+ /*
+ * Put in the IP header and routing stuff.
+ */
+
+ tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
+ sizeof(struct tcphdr),tos,ttl);
+ if (tmp < 0)
+ {
+ buff->free = 1;
+ prot->wfree(NULL, buff->mem_addr, buff->mem_len);
+ return;
+ }
+
+ t1 =(struct tcphdr *)((char *)t1 +tmp);
+ buff->len += tmp;
+ memcpy(t1, th, sizeof(*t1));
+
+ /*
+ * Swap the send and the receive.
+ */
+
+ t1->dest = th->source;
+ t1->source = th->dest;
+ t1->rst = 1;
+ t1->window = 0;
+
+ if(th->ack)
+ {
+ t1->ack = 0;
+ t1->seq = th->ack_seq;
+ t1->ack_seq = 0;
+ }
+ else
+ {
+ t1->ack = 1;
+ if(!th->syn)
+ t1->ack_seq=htonl(th->seq);
+ else
+ t1->ack_seq=htonl(th->seq+1);
+ t1->seq=0;
+ }
+
+ t1->syn = 0;
+ t1->urg = 0;
+ t1->fin = 0;
+ t1->psh = 0;
+ t1->doff = sizeof(*t1)/4;
+ tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
+ prot->queue_xmit(NULL, ndev, buff, 1);
+ tcp_statistics.TcpOutSegs++;
+}
+
+
+/*
+ * Look for tcp options. Parses everything but only knows about MSS.
+ * This routine is always called with the packet containing the SYN.
+ * However it may also be called with the ack to the SYN. So you
+ * can't assume this is always the SYN. It's always called after
+ * we have set up sk->mtu to our own MTU.
+ */
+
+static void tcp_options(struct sock *sk, struct tcphdr *th)
+{
+ unsigned char *ptr;
+ int length=(th->doff*4)-sizeof(struct tcphdr);
+ int mss_seen = 0;
+
+ ptr = (unsigned char *)(th + 1);
+
+ while(length>0)
+ {
+ int opcode=*ptr++;
+ int opsize=*ptr++;
+ switch(opcode)
+ {
+ case TCPOPT_EOL:
+ return;
+ case TCPOPT_NOP:
+ length-=2;
+ continue;
+
+ default:
+ if(opsize<=2) /* Avoid silly options looping forever */
+ return;
+ switch(opcode)
+ {
+ case TCPOPT_MSS:
+ if(opsize==4 && th->syn)
+ {
+ sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
+ mss_seen = 1;
+ }
+ break;
+ /* Add other options here as people feel the urge to implement stuff like large windows */
+ }
+ ptr+=opsize-2;
+ length-=opsize;
+ }
+ }
+ if (th->syn)
+ {
+ if (! mss_seen)
+ sk->mtu=min(sk->mtu, 536); /* default MSS if none sent */
+ }
+#ifdef CONFIG_INET_PCTCP
+ sk->mss = min(sk->max_window >> 1, sk->mtu);
+#else
+ sk->mss = min(sk->max_window, sk->mtu);
+#endif
+}
+
+static inline unsigned long default_mask(unsigned long dst)
+{
+ dst = ntohl(dst);
+ if (IN_CLASSA(dst))
+ return htonl(IN_CLASSA_NET);
+ if (IN_CLASSB(dst))
+ return htonl(IN_CLASSB_NET);
+ return htonl(IN_CLASSC_NET);
+}
+
+/*
+ * Default sequence number picking algorithm.
+ */
+
+extern inline long tcp_init_seq(void)
+{
+ return jiffies * SEQ_TICK - seq_offset;
+}
+
+/*
+ * This routine handles a connection request.
+ * It should make sure we haven't already responded.
+ * Because of the way BSD works, we have to send a syn/ack now.
+ * This also means it will be harder to close a socket which is
+ * listening.
+ */
+
+static void tcp_conn_request(struct sock *sk, struct sk_buff *skb,
+ unsigned long daddr, unsigned long saddr,
+ struct options *opt, struct device *dev, unsigned long seq)
+{
+ struct sk_buff *buff;
+ struct tcphdr *t1;
+ unsigned char *ptr;
+ struct sock *newsk;
+ struct tcphdr *th;
+ struct device *ndev=NULL;
+ int tmp;
+ struct rtable *rt;
+
+ th = skb->h.th;
+
+ /* If the socket is dead, don't accept the connection. */
+ if (!sk->dead)
+ {
+ sk->data_ready(sk,0);
+ }
+ else
+ {
+ if(sk->debug)
+ printk("Reset on %p: Connect on dead socket.\n",sk);
+ tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
+ tcp_statistics.TcpAttemptFails++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Make sure we can accept more. This will prevent a
+ * flurry of syns from eating up all our memory.
+ */
+
+ if (sk->ack_backlog >= sk->max_ack_backlog)
+ {
+ tcp_statistics.TcpAttemptFails++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * We need to build a new sock struct.
+ * It is sort of bad to have a socket without an inode attached
+ * to it, but the wake_up's will just wake up the listening socket,
+ * and if the listening socket is destroyed before this is taken
+ * off of the queue, this will take care of it.
+ */
+
+ newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
+ if (newsk == NULL)
+ {
+ /* just ignore the syn. It will get retransmitted. */
+ tcp_statistics.TcpAttemptFails++;
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ memcpy(newsk, sk, sizeof(*newsk));
+ skb_queue_head_init(&newsk->write_queue);
+ skb_queue_head_init(&newsk->receive_queue);
+ newsk->send_head = NULL;
+ newsk->send_tail = NULL;
+ skb_queue_head_init(&newsk->back_log);
+ newsk->rtt = 0; /*TCP_CONNECT_TIME<<3*/
+ newsk->rto = TCP_TIMEOUT_INIT;
+ newsk->mdev = 0;
+ newsk->max_window = 0;
+ newsk->cong_window = 1;
+ newsk->cong_count = 0;
+ newsk->ssthresh = 0;
+ newsk->backoff = 0;
+ newsk->blog = 0;
+ newsk->intr = 0;
+ newsk->proc = 0;
+ newsk->done = 0;
+ newsk->partial = NULL;
+ newsk->pair = NULL;
+ newsk->wmem_alloc = 0;
+ newsk->rmem_alloc = 0;
+ newsk->localroute = sk->localroute;
+
+ newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
+
+ newsk->err = 0;
+ newsk->shutdown = 0;
+ newsk->ack_backlog = 0;
+ newsk->acked_seq = skb->h.th->seq+1;
+ newsk->fin_seq = skb->h.th->seq;
+ newsk->copied_seq = skb->h.th->seq;
+ newsk->state = TCP_SYN_RECV;
+ newsk->timeout = 0;
+ newsk->write_seq = seq;
+ newsk->window_seq = newsk->write_seq;
+ newsk->rcv_ack_seq = newsk->write_seq;
+ newsk->urg_data = 0;
+ newsk->retransmits = 0;
+ newsk->destroy = 0;
+ init_timer(&newsk->timer);
+ newsk->timer.data = (unsigned long)newsk;
+ newsk->timer.function = &net_timer;
+ newsk->dummy_th.source = skb->h.th->dest;
+ newsk->dummy_th.dest = skb->h.th->source;
+
+ /*
+ * Swap these two, they are from our point of view.
+ */
+
+ newsk->daddr = saddr;
+ newsk->saddr = daddr;
+
+ put_sock(newsk->num,newsk);
+ newsk->dummy_th.res1 = 0;
+ newsk->dummy_th.doff = 6;
+ newsk->dummy_th.fin = 0;
+ newsk->dummy_th.syn = 0;
+ newsk->dummy_th.rst = 0;
+ newsk->dummy_th.psh = 0;
+ newsk->dummy_th.ack = 0;
+ newsk->dummy_th.urg = 0;
+ newsk->dummy_th.res2 = 0;
+ newsk->acked_seq = skb->h.th->seq + 1;
+ newsk->copied_seq = skb->h.th->seq;
+ newsk->socket = NULL;
+
+ /*
+ * Grab the ttl and tos values and use them
+ */
+
+ newsk->ip_ttl=sk->ip_ttl;
+ newsk->ip_tos=skb->ip_hdr->tos;
+
+ /*
+ * Use 512 or whatever user asked for
+ */
+
+ /*
+ * Note use of sk->user_mss, since user has no direct access to newsk
+ */
+
+ rt=ip_rt_route(saddr, NULL,NULL);
+
+ if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
+ newsk->window_clamp = rt->rt_window;
+ else
+ newsk->window_clamp = 0;
+
+ if (sk->user_mss)
+ newsk->mtu = sk->user_mss;
+ else if(rt!=NULL && (rt->rt_flags&RTF_MSS))
+ newsk->mtu = rt->rt_mss - HEADER_SIZE;
+ else
+ {
+#ifdef CONFIG_INET_SNARL /* Sub Nets Are Local */
+ if ((saddr ^ daddr) & default_mask(saddr))
+#else
+ if ((saddr ^ daddr) & dev->pa_mask)
+#endif
+ newsk->mtu = 576 - HEADER_SIZE;
+ else
+ newsk->mtu = MAX_WINDOW;
+ }
+
+ /*
+ * But not bigger than device MTU
+ */
+
+ newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
+
+ /*
+ * This will min with what arrived in the packet
+ */
+
+ tcp_options(newsk,skb->h.th);
+
+ buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
+ if (buff == NULL)
+ {
+ sk->err = -ENOMEM;
+ newsk->dead = 1;
+ release_sock(newsk);
+ kfree_skb(skb, FREE_READ);
+ tcp_statistics.TcpAttemptFails++;
+ return;
+ }
+
+ buff->len = sizeof(struct tcphdr)+4;
+ buff->sk = newsk;
+ buff->localroute = newsk->localroute;
+
+ t1 =(struct tcphdr *) buff->data;
+
+ /*
+ * Put in the IP header and routing stuff.
+ */
+
+ tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
+ IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
+
+ /*
+ * Something went wrong.
+ */
+
+ if (tmp < 0)
+ {
+ sk->err = tmp;
+ buff->free=1;
+ kfree_skb(buff,FREE_WRITE);
+ newsk->dead = 1;
+ release_sock(newsk);
+ skb->sk = sk;
+ kfree_skb(skb, FREE_READ);
+ tcp_statistics.TcpAttemptFails++;
+ return;
+ }
+
+ buff->len += tmp;
+ t1 =(struct tcphdr *)((char *)t1 +tmp);
+
+ memcpy(t1, skb->h.th, sizeof(*t1));
+ buff->h.seq = newsk->write_seq;
+ /*
+ * Swap the send and the receive.
+ */
+ t1->dest = skb->h.th->source;
+ t1->source = newsk->dummy_th.source;
+ t1->seq = ntohl(newsk->write_seq++);
+ t1->ack = 1;
+ newsk->window = tcp_select_window(newsk);
+ newsk->sent_seq = newsk->write_seq;
+ t1->window = ntohs(newsk->window);
+ t1->res1 = 0;
+ t1->res2 = 0;
+ t1->rst = 0;
+ t1->urg = 0;
+ t1->psh = 0;
+ t1->syn = 1;
+ t1->ack_seq = ntohl(skb->h.th->seq+1);
+ t1->doff = sizeof(*t1)/4+1;
+ ptr =(unsigned char *)(t1+1);
+ ptr[0] = 2;
+ ptr[1] = 4;
+ ptr[2] = ((newsk->mtu) >> 8) & 0xff;
+ ptr[3] =(newsk->mtu) & 0xff;
+
+ tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
+ newsk->prot->queue_xmit(newsk, ndev, buff, 0);
+
+ reset_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
+ skb->sk = newsk;
+
+ /*
+ * Charge the sock_buff to newsk.
+ */
+
+ sk->rmem_alloc -= skb->mem_len;
+ newsk->rmem_alloc += skb->mem_len;
+
+ skb_queue_tail(&sk->receive_queue,skb);
+ sk->ack_backlog++;
+ release_sock(newsk);
+ tcp_statistics.TcpOutSegs++;
+}
+
+
+static void tcp_close(struct sock *sk, int timeout)
+{
+ struct sk_buff *buff;
+ struct tcphdr *t1, *th;
+ struct proto *prot;
+ struct device *dev=NULL;
+ int tmp;
+
+ /*
+ * We need to grab some memory, and put together a FIN,
+ * and then put it into the queue to be sent.
+ */
+ sk->inuse = 1;
+ sk->keepopen = 1;
+ sk->shutdown = SHUTDOWN_MASK;
+
+ if (!sk->dead)
+ sk->state_change(sk);
+
+ if (timeout == 0)
+ {
+ /*
+ * We need to flush the recv. buffs. We do this only on the
+ * descriptor close, not protocol-sourced closes, because the
+ * reader process may not have drained the data yet!
+ */
+
+ if (skb_peek(&sk->receive_queue) != NULL)
+ {
+ struct sk_buff *skb;
+ if(sk->debug)
+ printk("Clean rcv queue\n");
+ while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
+ kfree_skb(skb, FREE_READ);
+ if(sk->debug)
+ printk("Cleaned.\n");
+ }
+ }
+
+ /*
+ * Get rid off any half-completed packets.
+ */
+
+ if (sk->partial)
+ {
+ tcp_send_partial(sk);
+ }
+
+ switch(sk->state)
+ {
+ case TCP_FIN_WAIT1:
+ case TCP_FIN_WAIT2:
+ case TCP_CLOSING:
+ /*
+ * These states occur when we have already closed out
+ * our end. If there is no timeout, we do not do
+ * anything. We may still be in the middle of sending
+ * the remainder of our buffer, for example...
+ * resetting the timer would be inappropriate.
+ *
+ * XXX if retransmit count reaches limit, is tcp_close()
+ * called with timeout == 1 ? if not, we need to fix that.
+ */
+ if (!timeout) {
+ int timer_active;
+
+ timer_active = del_timer(&sk->timer);
+ if (timer_active)
+ add_timer(&sk->timer);
+ else
+ reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
+ }
+ if (timeout)
+ tcp_time_wait(sk);
+ release_sock(sk);
+ return; /* break causes a double release - messy */
+ case TCP_TIME_WAIT:
+ case TCP_LAST_ACK:
+ /*
+ * A timeout from these states terminates the TCB.
+ */
+ if (timeout)
+ {
+ tcp_set_state(sk,TCP_CLOSE);
+ }
+ release_sock(sk);
+ return;
+ case TCP_LISTEN:
+ /* we need to drop any sockets which have been connected,
+ but have not yet been accepted. */
+ tcp_close_pending(sk, timeout);
+ tcp_set_state(sk,TCP_CLOSE);
+ release_sock(sk);
+ return;
+ case TCP_CLOSE:
+ release_sock(sk);
+ return;
+ case TCP_CLOSE_WAIT:
+ case TCP_ESTABLISHED:
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ prot =(struct proto *)sk->prot;
+ th =(struct tcphdr *)&sk->dummy_th;
+ buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
+ if (buff == NULL)
+ {
+ /* This will force it to try again later. */
+ /* Or it would have if someone released the socket
+ first. Anyway it might work now */
+ release_sock(sk);
+ if (sk->state != TCP_CLOSE_WAIT)
+ tcp_set_state(sk,TCP_ESTABLISHED);
+ reset_timer(sk, TIME_CLOSE, 100);
+ return;
+ }
+ buff->sk = sk;
+ buff->free = 1;
+ buff->len = sizeof(*t1);
+ buff->localroute = sk->localroute;
+ t1 =(struct tcphdr *) buff->data;
+
+ /*
+ * Put in the IP header and routing stuff.
+ */
+ tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
+ IPPROTO_TCP, sk->opt,
+ sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ sk->write_seq++; /* Very important 8) */
+ kfree_skb(buff,FREE_WRITE);
+
+ /*
+ * Enter FIN_WAIT1 to await completion of
+ * written out data and ACK to our FIN.
+ */
+
+ if(sk->state==TCP_ESTABLISHED)
+ tcp_set_state(sk,TCP_FIN_WAIT1);
+ else
+ tcp_set_state(sk,TCP_FIN_WAIT2);
+ reset_timer(sk, TIME_CLOSE,4*sk->rto);
+ if(timeout)
+ tcp_time_wait(sk);
+
+ release_sock(sk);
+ return;
+ }
+
+ t1 =(struct tcphdr *)((char *)t1 +tmp);
+ buff->len += tmp;
+ buff->dev = dev;
+ memcpy(t1, th, sizeof(*t1));
+ t1->seq = ntohl(sk->write_seq);
+ sk->write_seq++;
+ buff->h.seq = sk->write_seq;
+ t1->ack = 1;
+
+ /*
+ * Ack everything immediately from now on.
+ */
+
+ sk->delay_acks = 0;
+ t1->ack_seq = ntohl(sk->acked_seq);
+ t1->window = ntohs(sk->window=tcp_select_window(sk));
+ t1->fin = 1;
+ t1->rst = 0;
+ t1->doff = sizeof(*t1)/4;
+ tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
+
+ tcp_statistics.TcpOutSegs++;
+
+ if (skb_peek(&sk->write_queue) == NULL)
+ {
+ sk->sent_seq = sk->write_seq;
+ prot->queue_xmit(sk, dev, buff, 0);
+ }
+ else
+ {
+ reset_timer(sk, TIME_WRITE, sk->rto);
+ if (buff->next != NULL)
+ {
+ printk("tcp_close: next != NULL\n");
+ skb_unlink(buff);
+ }
+ skb_queue_tail(&sk->write_queue, buff);
+ }
+
+ /*
+ * If established (normal close), enter FIN_WAIT1.
+ * If in CLOSE_WAIT, enter LAST_ACK
+ * If in CLOSING, remain in CLOSING
+ * otherwise enter FIN_WAIT2
+ */
+
+ if (sk->state == TCP_ESTABLISHED)
+ tcp_set_state(sk,TCP_FIN_WAIT1);
+ else if (sk->state == TCP_CLOSE_WAIT)
+ tcp_set_state(sk,TCP_LAST_ACK);
+ else if (sk->state != TCP_CLOSING)
+ tcp_set_state(sk,TCP_FIN_WAIT2);
+ }
+ release_sock(sk);
+}
+
+
+/*
+ * This routine takes stuff off of the write queue,
+ * and puts it in the xmit queue.
+ */
+static void
+tcp_write_xmit(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ /*
+ * The bytes will have to remain here. In time closedown will
+ * empty the write queue and all will be happy
+ */
+
+ if(sk->zapped)
+ return;
+
+ while((skb = skb_peek(&sk->write_queue)) != NULL &&
+ before(skb->h.seq, sk->window_seq + 1) &&
+ (sk->retransmits == 0 ||
+ sk->timeout != TIME_WRITE ||
+ before(skb->h.seq, sk->rcv_ack_seq + 1))
+ && sk->packets_out < sk->cong_window)
+ {
+ IS_SKB(skb);
+ skb_unlink(skb);
+ /* See if we really need to send the packet. */
+ if (before(skb->h.seq, sk->rcv_ack_seq +1))
+ {
+ sk->retransmits = 0;
+ kfree_skb(skb, FREE_WRITE);
+ if (!sk->dead)
+ sk->write_space(sk);
+ }
+ else
+ {
+ struct tcphdr *th;
+ struct iphdr *iph;
+ int size;
+/*
+ * put in the ack seq and window at this point rather than earlier,
+ * in order to keep them monotonic. We really want to avoid taking
+ * back window allocations. That's legal, but RFC1122 says it's frowned on.
+ * Ack and window will in general have changed since this packet was put
+ * on the write queue.
+ */
+ iph = (struct iphdr *)(skb->data +
+ skb->dev->hard_header_len);
+ th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
+ size = skb->len - (((unsigned char *) th) - skb->data);
+
+ th->ack_seq = ntohl(sk->acked_seq);
+ th->window = ntohs(tcp_select_window(sk));
+
+ tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
+
+ sk->sent_seq = skb->h.seq;
+ sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
+ }
+ }
+}
+
+
+/*
+ * This routine deals with incoming acks, but not outgoing ones.
+ */
+
+static int tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
+{
+ unsigned long ack;
+ int flag = 0;
+
+ /*
+ * 1 - there was data in packet as well as ack or new data is sent or
+ * in shutdown state
+ * 2 - data from retransmit queue was acked and removed
+ * 4 - window shrunk or data from retransmit queue was acked and removed
+ */
+
+ if(sk->zapped)
+ return(1); /* Dead, cant ack any more so why bother */
+
+ ack = ntohl(th->ack_seq);
+ if (ntohs(th->window) > sk->max_window)
+ {
+ sk->max_window = ntohs(th->window);
+#ifdef CONFIG_INET_PCTCP
+ sk->mss = min(sk->max_window>>1, sk->mtu);
+#else
+ sk->mss = min(sk->max_window, sk->mtu);
+#endif
+ }
+
+ if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
+ sk->retransmits = 0;
+
+ if (after(ack, sk->sent_seq) || before(ack, sk->rcv_ack_seq))
+ {
+ if(sk->debug)
+ printk("Ack ignored %lu %lu\n",ack,sk->sent_seq);
+
+ /*
+ * Keepalive processing.
+ */
+
+ if (after(ack, sk->sent_seq) || (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT))
+ {
+ return(0);
+ }
+ if (sk->keepopen)
+ {
+ if(sk->timeout==TIME_KEEPOPEN)
+ reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
+ }
+ return(1);
+ }
+
+ if (len != th->doff*4)
+ flag |= 1;
+
+ /* See if our window has been shrunk. */
+
+ if (after(sk->window_seq, ack+ntohs(th->window)))
+ {
+ /*
+ * We may need to move packets from the send queue
+ * to the write queue, if the window has been shrunk on us.
+ * The RFC says you are not allowed to shrink your window
+ * like this, but if the other end does, you must be able
+ * to deal with it.
+ */
+ struct sk_buff *skb;
+ struct sk_buff *skb2;
+ struct sk_buff *wskb = NULL;
+
+ skb2 = sk->send_head;
+ sk->send_head = NULL;
+ sk->send_tail = NULL;
+
+ flag |= 4;
+
+ sk->window_seq = ack + ntohs(th->window);
+ cli();
+ while (skb2 != NULL)
+ {
+ skb = skb2;
+ skb2 = skb->link3;
+ skb->link3 = NULL;
+ if (after(skb->h.seq, sk->window_seq))
+ {
+ if (sk->packets_out > 0)
+ sk->packets_out--;
+ /* We may need to remove this from the dev send list. */
+ if (skb->next != NULL)
+ {
+ skb_unlink(skb);
+ }
+ /* Now add it to the write_queue. */
+ if (wskb == NULL)
+ skb_queue_head(&sk->write_queue,skb);
+ else
+ skb_append(wskb,skb);
+ wskb = skb;
+ }
+ else
+ {
+ if (sk->send_head == NULL)
+ {
+ sk->send_head = skb;
+ sk->send_tail = skb;
+ }
+ else
+ {
+ sk->send_tail->link3 = skb;
+ sk->send_tail = skb;
+ }
+ skb->link3 = NULL;
+ }
+ }
+ sti();
+ }
+
+ if (sk->send_tail == NULL || sk->send_head == NULL)
+ {
+ sk->send_head = NULL;
+ sk->send_tail = NULL;
+ sk->packets_out= 0;
+ }
+
+ sk->window_seq = ack + ntohs(th->window);
+
+ /* We don't want too many packets out there. */
+ if (sk->timeout == TIME_WRITE &&
+ sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq))
+ {
+/*
+ * This is Jacobson's slow start and congestion avoidance.
+ * SIGCOMM '88, p. 328. Because we keep cong_window in integral
+ * mss's, we can't do cwnd += 1 / cwnd. Instead, maintain a
+ * counter and increment it once every cwnd times. It's possible
+ * that this should be done only if sk->retransmits == 0. I'm
+ * interpreting "new data is acked" as including data that has
+ * been retransmitted but is just now being acked.
+ */
+ if (sk->cong_window < sk->ssthresh)
+ /*
+ * In "safe" area, increase
+ */
+ sk->cong_window++;
+ else
+ {
+ /*
+ * In dangerous area, increase slowly. In theory this is
+ * sk->cong_window += 1 / sk->cong_window
+ */
+ if (sk->cong_count >= sk->cong_window)
+ {
+ sk->cong_window++;
+ sk->cong_count = 0;
+ }
+ else
+ sk->cong_count++;
+ }
+ }
+
+ sk->rcv_ack_seq = ack;
+
+ /*
+ * if this ack opens up a zero window, clear backoff. It was
+ * being used to time the probes, and is probably far higher than
+ * it needs to be for normal retransmission.
+ */
+
+ if (sk->timeout == TIME_PROBE0)
+ {
+ if (skb_peek(&sk->write_queue) != NULL && /* should always be non-null */
+ ! before (sk->window_seq, sk->write_queue.next->h.seq))
+ {
+ sk->retransmits = 0;
+ sk->backoff = 0;
+ /*
+ * Recompute rto from rtt. this eliminates any backoff.
+ */
+
+ sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
+ if (sk->rto > 120*HZ)
+ sk->rto = 120*HZ;
+ if (sk->rto < 20) /* Was 1*HZ, then 1 - turns out we must allow about
+ .2 of a second because of BSD delayed acks - on a 100Mb/sec link
+ .2 of a second is going to need huge windows (SIGH) */
+ sk->rto = 20;
+ }
+ }
+
+ /*
+ * See if we can take anything off of the retransmit queue.
+ */
+
+ while(sk->send_head != NULL)
+ {
+ /* Check for a bug. */
+ if (sk->send_head->link3 &&
+ after(sk->send_head->h.seq, sk->send_head->link3->h.seq))
+ printk("INET: tcp.c: *** bug send_list out of order.\n");
+ if (before(sk->send_head->h.seq, ack+1))
+ {
+ struct sk_buff *oskb;
+ if (sk->retransmits)
+ {
+ /*
+ * We were retransmitting. don't count this in RTT est
+ */
+ flag |= 2;
+
+ /*
+ * even though we've gotten an ack, we're still
+ * retransmitting as long as we're sending from
+ * the retransmit queue. Keeping retransmits non-zero
+ * prevents us from getting new data interspersed with
+ * retransmissions.
+ */
+
+ if (sk->send_head->link3)
+ sk->retransmits = 1;
+ else
+ sk->retransmits = 0;
+ }
+ /*
+ * Note that we only reset backoff and rto in the
+ * rtt recomputation code. And that doesn't happen
+ * if there were retransmissions in effect. So the
+ * first new packet after the retransmissions is
+ * sent with the backoff still in effect. Not until
+ * we get an ack from a non-retransmitted packet do
+ * we reset the backoff and rto. This allows us to deal
+ * with a situation where the network delay has increased
+ * suddenly. I.e. Karn's algorithm. (SIGCOMM '87, p5.)
+ */
+
+ /*
+ * We have one less packet out there.
+ */
+
+ if (sk->packets_out > 0)
+ sk->packets_out --;
+ /*
+ * Wake up the process, it can probably write more.
+ */
+ if (!sk->dead)
+ sk->write_space(sk);
+ oskb = sk->send_head;
+
+ if (!(flag&2))
+ {
+ long m;
+
+ /*
+ * The following amusing code comes from Jacobson's
+ * article in SIGCOMM '88. Note that rtt and mdev
+ * are scaled versions of rtt and mean deviation.
+ * This is designed to be as fast as possible
+ * m stands for "measurement".
+ */
+
+ m = jiffies - oskb->when; /* RTT */
+ if(m<=0)
+ m=1; /* IS THIS RIGHT FOR <0 ??? */
+ m -= (sk->rtt >> 3); /* m is now error in rtt est */
+ sk->rtt += m; /* rtt = 7/8 rtt + 1/8 new */
+ if (m < 0)
+ m = -m; /* m is now abs(error) */
+ m -= (sk->mdev >> 2); /* similar update on mdev */
+ sk->mdev += m; /* mdev = 3/4 mdev + 1/4 new */
+
+ /*
+ * Now update timeout. Note that this removes any backoff.
+ */
+
+ sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
+ if (sk->rto > 120*HZ)
+ sk->rto = 120*HZ;
+ if (sk->rto < 20) /* Was 1*HZ - keep .2 as minimum cos of the BSD delayed acks */
+ sk->rto = 20;
+ sk->backoff = 0;
+ }
+ flag |= (2|4);
+ cli();
+ oskb = sk->send_head;
+ IS_SKB(oskb);
+ sk->send_head = oskb->link3;
+ if (sk->send_head == NULL)
+ {
+ sk->send_tail = NULL;
+ }
+
+ /*
+ * We may need to remove this from the dev send list.
+ */
+
+ if (oskb->next)
+ skb_unlink(oskb);
+ sti();
+ kfree_skb(oskb, FREE_WRITE); /* write. */
+ if (!sk->dead)
+ sk->write_space(sk);
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /*
+ * XXX someone ought to look at this too.. at the moment, if skb_peek()
+ * returns non-NULL, we complete ignore the timer stuff in the else
+ * clause. We ought to organize the code so that else clause can
+ * (should) be executed regardless, possibly moving the PROBE timer
+ * reset over. The skb_peek() thing should only move stuff to the
+ * write queue, NOT also manage the timer functions.
+ */
+
+ /*
+ * Maybe we can take some stuff off of the write queue,
+ * and put it onto the xmit queue.
+ */
+ if (skb_peek(&sk->write_queue) != NULL)
+ {
+ if (after (sk->window_seq+1, sk->write_queue.next->h.seq) &&
+ (sk->retransmits == 0 ||
+ sk->timeout != TIME_WRITE ||
+ before(sk->write_queue.next->h.seq, sk->rcv_ack_seq + 1))
+ && sk->packets_out < sk->cong_window)
+ {
+ flag |= 1;
+ tcp_write_xmit(sk);
+ }
+ else if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
+ sk->send_head == NULL &&
+ sk->ack_backlog == 0 &&
+ sk->state != TCP_TIME_WAIT)
+ {
+ reset_timer(sk, TIME_PROBE0, sk->rto);
+ }
+ }
+ else
+ {
+ /*
+ * from TIME_WAIT we stay in TIME_WAIT as long as we rx packets
+ * from TCP_CLOSE we don't do anything
+ *
+ * from anything else, if there is write data (or fin) pending,
+ * we use a TIME_WRITE timeout, else if keepalive we reset to
+ * a KEEPALIVE timeout, else we delete the timer.
+ *
+ * We do not set flag for nominal write data, otherwise we may
+ * force a state where we start to write itsy bitsy tidbits
+ * of data.
+ */
+
+ switch(sk->state) {
+ case TCP_TIME_WAIT:
+ /*
+ * keep us in TIME_WAIT until we stop getting packets,
+ * reset the timeout.
+ */
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ break;
+ case TCP_CLOSE:
+ /*
+ * don't touch the timer.
+ */
+ break;
+ default:
+ /*
+ * must check send_head, write_queue, and ack_backlog
+ * to determine which timeout to use.
+ */
+ if (sk->send_head || skb_peek(&sk->write_queue) != NULL || sk->ack_backlog) {
+ reset_timer(sk, TIME_WRITE, sk->rto);
+ } else if (sk->keepopen) {
+ reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
+ } else {
+ delete_timer(sk);
+ }
+ break;
+ }
+#ifdef NOTDEF
+ if (sk->send_head == NULL && sk->ack_backlog == 0 &&
+ sk->state != TCP_TIME_WAIT && !sk->keepopen)
+ {
+ if (!sk->dead)
+ sk->write_space(sk);
+ if (sk->keepopen) {
+ reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
+ } else {
+ delete_timer(sk);
+ }
+ }
+ else
+ {
+ if (sk->state != (unsigned char) sk->keepopen)
+ {
+ reset_timer(sk, TIME_WRITE, sk->rto);
+ }
+ if (sk->state == TCP_TIME_WAIT)
+ {
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ }
+ }
+#endif
+ }
+
+ if (sk->packets_out == 0 && sk->partial != NULL &&
+ skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL)
+ {
+ flag |= 1;
+ tcp_send_partial(sk);
+ }
+
+ /*
+ * In the LAST_ACK case, the other end FIN'd us. We then FIN'd them, and
+ * we are now waiting for an acknowledge to our FIN. The other end is
+ * already in TIME_WAIT.
+ *
+ * Move to TCP_CLOSE on success.
+ */
+
+ if (sk->state == TCP_LAST_ACK)
+ {
+ if (!sk->dead)
+ sk->state_change(sk);
+ if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq)
+ {
+ flag |= 1;
+ tcp_time_wait(sk);
+ sk->shutdown = SHUTDOWN_MASK;
+ }
+ }
+
+ /*
+ * Incoming ACK to a FIN we sent in the case of our initiating the close.
+ *
+ * Move to FIN_WAIT2 to await a FIN from the other end. Set
+ * SEND_SHUTDOWN but not RCV_SHUTDOWN as data can still be coming in.
+ */
+
+ if (sk->state == TCP_FIN_WAIT1)
+ {
+
+ if (!sk->dead)
+ sk->state_change(sk);
+ if (sk->rcv_ack_seq == sk->write_seq)
+ {
+ flag |= 1;
+ sk->shutdown |= SEND_SHUTDOWN;
+ tcp_set_state(sk, TCP_FIN_WAIT2);
+ }
+ }
+
+ /*
+ * Incoming ACK to a FIN we sent in the case of a simultaneous close.
+ *
+ * Move to TIME_WAIT
+ */
+
+ if (sk->state == TCP_CLOSING)
+ {
+
+ if (!sk->dead)
+ sk->state_change(sk);
+ if (sk->rcv_ack_seq == sk->write_seq)
+ {
+ flag |= 1;
+ tcp_time_wait(sk);
+ }
+ }
+
+ /*
+ * I make no guarantees about the first clause in the following
+ * test, i.e. "(!flag) || (flag&4)". I'm not entirely sure under
+ * what conditions "!flag" would be true. However I think the rest
+ * of the conditions would prevent that from causing any
+ * unnecessary retransmission.
+ * Clearly if the first packet has expired it should be
+ * retransmitted. The other alternative, "flag&2 && retransmits", is
+ * harder to explain: You have to look carefully at how and when the
+ * timer is set and with what timeout. The most recent transmission always
+ * sets the timer. So in general if the most recent thing has timed
+ * out, everything before it has as well. So we want to go ahead and
+ * retransmit some more. If we didn't explicitly test for this
+ * condition with "flag&2 && retransmits", chances are "when + rto < jiffies"
+ * would not be true. If you look at the pattern of timing, you can
+ * show that rto is increased fast enough that the next packet would
+ * almost never be retransmitted immediately. Then you'd end up
+ * waiting for a timeout to send each packet on the retransmission
+ * queue. With my implementation of the Karn sampling algorithm,
+ * the timeout would double each time. The net result is that it would
+ * take a hideous amount of time to recover from a single dropped packet.
+ * It's possible that there should also be a test for TIME_WRITE, but
+ * I think as long as "send_head != NULL" and "retransmit" is on, we've
+ * got to be in real retransmission mode.
+ * Note that ip_do_retransmit is called with all==1. Setting cong_window
+ * back to 1 at the timeout will cause us to send 1, then 2, etc. packets.
+ * As long as no further losses occur, this seems reasonable.
+ */
+
+ if (((!flag) || (flag&4)) && sk->send_head != NULL &&
+ (((flag&2) && sk->retransmits) ||
+ (sk->send_head->when + sk->rto < jiffies)))
+ {
+ ip_do_retransmit(sk, 1);
+ reset_timer(sk, TIME_WRITE, sk->rto);
+ }
+
+ return(1);
+}
+
+
+/*
+ * This routine handles the data. If there is room in the buffer,
+ * it will be have already been moved into it. If there is no
+ * room, then we will just have to discard the packet.
+ */
+
+static int tcp_data(struct sk_buff *skb, struct sock *sk,
+ unsigned long saddr, unsigned short len)
+{
+ struct sk_buff *skb1, *skb2;
+ struct tcphdr *th;
+ int dup_dumped=0;
+ unsigned long new_seq;
+ struct sk_buff *tail;
+ unsigned long shut_seq;
+
+ th = skb->h.th;
+ skb->len = len -(th->doff*4);
+
+ /* The bytes in the receive read/assembly queue has increased. Needed for the
+ low memory discard algorithm */
+
+ sk->bytes_rcv += skb->len;
+
+ if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
+ {
+ /*
+ * Don't want to keep passing ack's back and forth.
+ * (someone sent us dataless, boring frame)
+ */
+ if (!th->ack)
+ tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
+ kfree_skb(skb, FREE_READ);
+ return(0);
+ }
+
+ /*
+ * We no longer have anyone receiving data on this connection.
+ */
+
+ if(sk->shutdown & RCV_SHUTDOWN)
+ {
+ new_seq= th->seq + skb->len + th->syn; /* Right edge of _data_ part of frame */
+
+ /*
+ * This is subtle and not nice. When we shut down we can
+ * have data in the queue and acked_seq therefore not
+ * pointing to the last byte that will be read. Thus
+ * the naive implementation:
+ * after(new_seq,sk->acked_seq+1)
+ * will cause bogus resets IFF a resend of a frame that has
+ * been queued but not yet read after a shutdown has been done.
+ * What we do now is a bit more complex but works as
+ * follows. If the queue is empty copied_seq+1 is right (+1 for FIN)
+ * if the queue has data the shutdown occurs at the right edge of
+ * the last packet queued +1
+ *
+ * We can't simply ack data beyond this point as it has
+ * and will never be received by an application.
+ */
+ tail=skb_peek(&sk->receive_queue);
+ if(tail!=NULL)
+ {
+ tail=sk->receive_queue.prev;
+ shut_seq=tail->h.th->seq+tail->len+1;
+ }
+ else
+ shut_seq=sk->copied_seq+1;
+
+ if(after(new_seq,shut_seq))
+ {
+ sk->acked_seq = new_seq + th->fin;
+ if(sk->debug)
+ printk("Data arrived on %p after close [Data right edge %lX, Socket shut on %lX] %d\n",
+ sk, new_seq, shut_seq, sk->blog);
+ tcp_reset(sk->saddr, sk->daddr, skb->h.th,
+ sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
+ tcp_statistics.TcpEstabResets++;
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->err = EPIPE;
+ sk->shutdown = SHUTDOWN_MASK;
+ kfree_skb(skb, FREE_READ);
+ if (!sk->dead)
+ sk->state_change(sk);
+ return(0);
+ }
+ }
+ /*
+ * Now we have to walk the chain, and figure out where this one
+ * goes into it. This is set up so that the last packet we received
+ * will be the first one we look at, that way if everything comes
+ * in order, there will be no performance loss, and if they come
+ * out of order we will be able to fit things in nicely.
+ */
+
+ /*
+ * This should start at the last one, and then go around forwards.
+ */
+
+ if (skb_peek(&sk->receive_queue) == NULL) /* Empty queue is easy case */
+ {
+ skb_queue_head(&sk->receive_queue,skb);
+ skb1= NULL;
+ }
+ else
+ {
+ for(skb1=sk->receive_queue.prev; ; skb1 = skb1->prev)
+ {
+ if(sk->debug)
+ {
+ printk("skb1=%p :", skb1);
+ printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
+ printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
+ printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
+ sk->acked_seq);
+ }
+
+ /*
+ * Optimisation: Duplicate frame or extension of previous frame from
+ * same sequence point (lost ack case).
+ * The frame contains duplicate data or replaces a previous frame
+ * discard the previous frame (safe as sk->inuse is set) and put
+ * the new one in its place.
+ */
+
+ if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
+ {
+ skb_append(skb1,skb);
+ skb_unlink(skb1);
+ kfree_skb(skb1,FREE_READ);
+ dup_dumped=1;
+ skb1=NULL;
+ break;
+ }
+
+ /*
+ * Found where it fits
+ */
+
+ if (after(th->seq+1, skb1->h.th->seq))
+ {
+ skb_append(skb1,skb);
+ break;
+ }
+
+ /*
+ * See if we've hit the start. If so insert.
+ */
+ if (skb1 == skb_peek(&sk->receive_queue))
+ {
+ skb_queue_head(&sk->receive_queue, skb);
+ break;
+ }
+ }
+ }
+
+ /*
+ * Figure out what the ack value for this frame is
+ */
+
+ th->ack_seq = th->seq + skb->len;
+ if (th->syn)
+ th->ack_seq++;
+ if (th->fin)
+ th->ack_seq++;
+
+ if (before(sk->acked_seq, sk->copied_seq))
+ {
+ printk("*** tcp.c:tcp_data bug acked < copied\n");
+ sk->acked_seq = sk->copied_seq;
+ }
+
+ /*
+ * Now figure out if we can ack anything.
+ */
+
+ if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1))
+ {
+ if (before(th->seq, sk->acked_seq+1))
+ {
+ int newwindow;
+
+ if (after(th->ack_seq, sk->acked_seq))
+ {
+ newwindow = sk->window-(th->ack_seq - sk->acked_seq);
+ if (newwindow < 0)
+ newwindow = 0;
+ sk->window = newwindow;
+ sk->acked_seq = th->ack_seq;
+ }
+ skb->acked = 1;
+
+ /*
+ * When we ack the fin, we turn on the RCV_SHUTDOWN flag.
+ */
+
+ if (skb->h.th->fin)
+ {
+ if (!sk->dead)
+ sk->state_change(sk);
+ sk->shutdown |= RCV_SHUTDOWN;
+ }
+
+ for(skb2 = skb->next;
+ skb2 != (struct sk_buff *)&sk->receive_queue;
+ skb2 = skb2->next)
+ {
+ if (before(skb2->h.th->seq, sk->acked_seq+1))
+ {
+ if (after(skb2->h.th->ack_seq, sk->acked_seq))
+ {
+ newwindow = sk->window -
+ (skb2->h.th->ack_seq - sk->acked_seq);
+ if (newwindow < 0)
+ newwindow = 0;
+ sk->window = newwindow;
+ sk->acked_seq = skb2->h.th->ack_seq;
+ }
+ skb2->acked = 1;
+ /*
+ * When we ack the fin, we turn on
+ * the RCV_SHUTDOWN flag.
+ */
+ if (skb2->h.th->fin)
+ {
+ sk->shutdown |= RCV_SHUTDOWN;
+ if (!sk->dead)
+ sk->state_change(sk);
+ }
+
+ /*
+ * Force an immediate ack.
+ */
+
+ sk->ack_backlog = sk->max_ack_backlog;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /*
+ * This also takes care of updating the window.
+ * This if statement needs to be simplified.
+ */
+ if (!sk->delay_acks ||
+ sk->ack_backlog >= sk->max_ack_backlog ||
+ sk->bytes_rcv > sk->max_unacked || th->fin) {
+ /* tcp_send_ack(sk->sent_seq, sk->acked_seq,sk,th, saddr); */
+ }
+ else
+ {
+ sk->ack_backlog++;
+ if(sk->debug)
+ printk("Ack queued.\n");
+ reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
+ }
+ }
+ }
+
+ /*
+ * If we've missed a packet, send an ack.
+ * Also start a timer to send another.
+ */
+
+ if (!skb->acked)
+ {
+
+ /*
+ * This is important. If we don't have much room left,
+ * we need to throw out a few packets so we have a good
+ * window. Note that mtu is used, not mss, because mss is really
+ * for the send side. He could be sending us stuff as large as mtu.
+ */
+
+ while (sk->prot->rspace(sk) < sk->mtu)
+ {
+ skb1 = skb_peek(&sk->receive_queue);
+ if (skb1 == NULL)
+ {
+ printk("INET: tcp.c:tcp_data memory leak detected.\n");
+ break;
+ }
+
+ /*
+ * Don't throw out something that has been acked.
+ */
+
+ if (skb1->acked)
+ {
+ break;
+ }
+
+ skb_unlink(skb1);
+ kfree_skb(skb1, FREE_READ);
+ }
+ tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
+ sk->ack_backlog++;
+ reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
+ }
+ else
+ {
+ /* We missed a packet. Send an ack to try to resync things. */
+ tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
+ }
+
+ /*
+ * Now tell the user we may have some data.
+ */
+
+ if (!sk->dead)
+ {
+ if(sk->debug)
+ printk("Data wakeup.\n");
+ sk->data_ready(sk,0);
+ }
+ return(0);
+}
+
+
+static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
+{
+ unsigned long ptr = ntohs(th->urg_ptr);
+
+ if (ptr)
+ ptr--;
+ ptr += th->seq;
+
+ /* ignore urgent data that we've already seen and read */
+ if (after(sk->copied_seq+1, ptr))
+ return;
+
+ /* do we already have a newer (or duplicate) urgent pointer? */
+ if (sk->urg_data && !after(ptr, sk->urg_seq))
+ return;
+
+ /* tell the world about our new urgent pointer */
+ if (sk->proc != 0) {
+ if (sk->proc > 0) {
+ kill_proc(sk->proc, SIGURG, 1);
+ } else {
+ kill_pg(-sk->proc, SIGURG, 1);
+ }
+ }
+ sk->urg_data = URG_NOTYET;
+ sk->urg_seq = ptr;
+}
+
+static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
+ unsigned long saddr, unsigned long len)
+{
+ unsigned long ptr;
+
+ /* check if we get a new urgent pointer */
+ if (th->urg)
+ tcp_check_urg(sk,th);
+
+ /* do we wait for any urgent data? */
+ if (sk->urg_data != URG_NOTYET)
+ return 0;
+
+ /* is the urgent pointer pointing into this packet? */
+ ptr = sk->urg_seq - th->seq + th->doff*4;
+ if (ptr >= len)
+ return 0;
+
+ /* ok, got the correct packet, update info */
+ sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
+ if (!sk->dead)
+ sk->data_ready(sk,0);
+ return 0;
+}
+
+
+/*
+ * This deals with incoming fins. 'Linus at 9 O'clock' 8-)
+ *
+ * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
+ * (and thence onto LAST-ACK and finally, CLOSE, we never enter
+ * TIME-WAIT)
+ *
+ * If we are in FINWAIT-1, a received FIN indicates simultaneous
+ * close and we go into CLOSING (and later onto TIME-WAIT)
+ *
+ * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
+ *
+ */
+static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th,
+ unsigned long saddr, struct device *dev)
+{
+ sk->fin_seq = th->seq + skb->len + th->syn + th->fin;
+
+ if (!sk->dead)
+ {
+ sk->state_change(sk);
+ }
+
+ switch(sk->state)
+ {
+ case TCP_SYN_RECV:
+ case TCP_SYN_SENT:
+ case TCP_ESTABLISHED:
+ /*
+ * move to CLOSE_WAIT, tcp_data() already handled
+ * sending the ack.
+ */
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
+ tcp_set_state(sk,TCP_CLOSE_WAIT);
+ if (th->rst)
+ sk->shutdown = SHUTDOWN_MASK;
+ break;
+
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSING:
+ /*
+ * received a retransmission of the FIN, do
+ * nothing.
+ */
+ break;
+ case TCP_TIME_WAIT:
+ /*
+ * received a retransmission of the FIN,
+ * restart the TIME_WAIT timer.
+ */
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ return(0);
+ case TCP_FIN_WAIT1:
+ /*
+ * This case occurs when a simultaneous close
+ * happens, we must ack the received FIN and
+ * enter the CLOSING state.
+ *
+ * XXX timeout not set properly
+ */
+
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ tcp_set_state(sk,TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
+ /*
+ * received a FIN -- send ACK and enter TIME_WAIT
+ */
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ sk->shutdown|=SHUTDOWN_MASK;
+ tcp_set_state(sk,TCP_TIME_WAIT);
+ break;
+ case TCP_CLOSE:
+ /*
+ * already in CLOSE
+ */
+ break;
+ default:
+ tcp_set_state(sk,TCP_LAST_ACK);
+
+ /* Start the timers. */
+ reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ return(0);
+ }
+ sk->ack_backlog++;
+
+ return(0);
+}
+
+
+/* This will accept the next outstanding connection. */
+static struct sock *
+tcp_accept(struct sock *sk, int flags)
+{
+ struct sock *newsk;
+ struct sk_buff *skb;
+
+ /*
+ * We need to make sure that this socket is listening,
+ * and that it has something pending.
+ */
+
+ if (sk->state != TCP_LISTEN)
+ {
+ sk->err = EINVAL;
+ return(NULL);
+ }
+
+ /* Avoid the race. */
+ cli();
+ sk->inuse = 1;
+
+ while((skb = tcp_dequeue_established(sk)) == NULL)
+ {
+ if (flags & O_NONBLOCK)
+ {
+ sti();
+ release_sock(sk);
+ sk->err = EAGAIN;
+ return(NULL);
+ }
+
+ release_sock(sk);
+ interruptible_sleep_on(sk->sleep);
+ if (current->signal & ~current->blocked)
+ {
+ sti();
+ sk->err = ERESTARTSYS;
+ return(NULL);
+ }
+ sk->inuse = 1;
+ }
+ sti();
+
+ /*
+ * Now all we need to do is return skb->sk.
+ */
+
+ newsk = skb->sk;
+
+ kfree_skb(skb, FREE_READ);
+ sk->ack_backlog--;
+ release_sock(sk);
+ return(newsk);
+}
+
+
+/*
+ * This will initiate an outgoing connection.
+ */
+
+static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
+{
+ struct sk_buff *buff;
+ struct device *dev=NULL;
+ unsigned char *ptr;
+ int tmp;
+ int atype;
+ struct tcphdr *t1;
+ struct rtable *rt;
+
+ if (sk->state != TCP_CLOSE)
+ return(-EISCONN);
+
+ if (addr_len < 8)
+ return(-EINVAL);
+
+ if (usin->sin_family && usin->sin_family != AF_INET)
+ return(-EAFNOSUPPORT);
+
+ /*
+ * connect() to INADDR_ANY means loopback (BSD'ism).
+ */
+
+ if(usin->sin_addr.s_addr==INADDR_ANY)
+ usin->sin_addr.s_addr=ip_my_addr();
+
+ /*
+ * Don't want a TCP connection going to a broadcast address
+ */
+
+ if ((atype=ip_chk_addr(usin->sin_addr.s_addr)) == IS_BROADCAST || atype==IS_MULTICAST)
+ return -ENETUNREACH;
+
+ sk->inuse = 1;
+ sk->daddr = usin->sin_addr.s_addr;
+ sk->write_seq = jiffies * SEQ_TICK - seq_offset;
+ sk->window_seq = sk->write_seq;
+ sk->rcv_ack_seq = sk->write_seq -1;
+ sk->err = 0;
+ sk->dummy_th.dest = usin->sin_port;
+ release_sock(sk);
+
+ buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
+ if (buff == NULL)
+ {
+ return(-ENOMEM);
+ }
+ sk->inuse = 1;
+ buff->len = 24;
+ buff->sk = sk;
+ buff->free = 1;
+ buff->localroute = sk->localroute;
+
+ t1 = (struct tcphdr *) buff->data;
+
+ /*
+ * Put in the IP header and routing stuff.
+ */
+
+ rt=ip_rt_route(sk->daddr, NULL, NULL);
+
+
+ /*
+ * We need to build the routing stuff from the things saved in skb.
+ */
+
+ tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
+ IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
+ release_sock(sk);
+ return(-ENETUNREACH);
+ }
+
+ buff->len += tmp;
+ t1 = (struct tcphdr *)((char *)t1 +tmp);
+
+ memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
+ t1->seq = ntohl(sk->write_seq++);
+ sk->sent_seq = sk->write_seq;
+ buff->h.seq = sk->write_seq;
+ t1->ack = 0;
+ t1->window = 2;
+ t1->res1=0;
+ t1->res2=0;
+ t1->rst = 0;
+ t1->urg = 0;
+ t1->psh = 0;
+ t1->syn = 1;
+ t1->urg_ptr = 0;
+ t1->doff = 6;
+ /* use 512 or whatever user asked for */
+
+ if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
+ sk->window_clamp=rt->rt_window;
+ else
+ sk->window_clamp=0;
+
+ if (sk->user_mss)
+ sk->mtu = sk->user_mss;
+ else if(rt!=NULL && (rt->rt_flags&RTF_MTU))
+ sk->mtu = rt->rt_mss;
+ else
+ {
+#ifdef CONFIG_INET_SNARL
+ if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
+#else
+ if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
+#endif
+ sk->mtu = 576 - HEADER_SIZE;
+ else
+ sk->mtu = MAX_WINDOW;
+ }
+ /*
+ * but not bigger than device MTU
+ */
+
+ if(sk->mtu <32)
+ sk->mtu = 32; /* Sanity limit */
+
+ sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
+
+ /*
+ * Put in the TCP options to say MTU.
+ */
+
+ ptr = (unsigned char *)(t1+1);
+ ptr[0] = 2;
+ ptr[1] = 4;
+ ptr[2] = (sk->mtu) >> 8;
+ ptr[3] = (sk->mtu) & 0xff;
+ tcp_send_check(t1, sk->saddr, sk->daddr,
+ sizeof(struct tcphdr) + 4, sk);
+
+ /*
+ * This must go first otherwise a really quick response will get reset.
+ */
+
+ tcp_set_state(sk,TCP_SYN_SENT);
+ sk->rto = TCP_TIMEOUT_INIT;
+ reset_timer(sk, TIME_WRITE, sk->rto); /* Timer for repeating the SYN until an answer */
+ sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
+
+ sk->prot->queue_xmit(sk, dev, buff, 0);
+ tcp_statistics.TcpActiveOpens++;
+ tcp_statistics.TcpOutSegs++;
+
+ release_sock(sk);
+ return(0);
+}
+
+
+/* This functions checks to see if the tcp header is actually acceptable. */
+static int
+tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
+ struct options *opt, unsigned long saddr, struct device *dev)
+{
+ unsigned long next_seq;
+
+ next_seq = len - 4*th->doff;
+ if (th->fin)
+ next_seq++;
+ /* if we have a zero window, we can't have any data in the packet.. */
+ if (next_seq && !sk->window)
+ goto ignore_it;
+ next_seq += th->seq;
+
+ /*
+ * This isn't quite right. sk->acked_seq could be more recent
+ * than sk->window. This is however close enough. We will accept
+ * slightly more packets than we should, but it should not cause
+ * problems unless someone is trying to forge packets.
+ */
+
+ /* have we already seen all of this packet? */
+ if (!after(next_seq+1, sk->acked_seq))
+ goto ignore_it;
+ /* or does it start beyond the window? */
+ if (!before(th->seq, sk->acked_seq + sk->window + 1))
+ goto ignore_it;
+
+ /* ok, at least part of this packet would seem interesting.. */
+ return 1;
+
+ignore_it:
+ if (th->rst)
+ return 0;
+
+ /*
+ * Send a reset if we get something not ours and we are
+ * unsynchronized. Note: We don't do anything to our end. We
+ * are just killing the bogus remote connection then we will
+ * connect again and it will work (with luck).
+ */
+
+ if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
+ tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
+ return 1;
+ }
+
+ /* Try to resync things. */
+ tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
+ return 0;
+}
+
+
+#ifdef TCP_FASTPATH
+/*
+ * Is the end of the queue clear of fragments as yet unmerged into the data stream
+ * Yes if
+ * a) The queue is empty
+ * b) The last frame on the queue has the acked flag set
+ */
+
+static inline int tcp_clean_end(struct sock *sk)
+{
+ struct sk_buff *skb=skb_peek(&sk->receive_queue);
+ if(skb==NULL || sk->receive_queue.prev->acked)
+ return 1;
+}
+
+#endif
+
+int
+tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
+ unsigned long daddr, unsigned short len,
+ unsigned long saddr, int redo, struct inet_protocol * protocol)
+{
+ struct tcphdr *th;
+ struct sock *sk;
+
+ if (!skb)
+ {
+ return(0);
+ }
+
+ if (!dev)
+ {
+ return(0);
+ }
+
+ tcp_statistics.TcpInSegs++;
+
+ if(skb->pkt_type!=PACKET_HOST)
+ {
+ kfree_skb(skb,FREE_READ);
+ return(0);
+ }
+
+ th = skb->h.th;
+
+ /*
+ * Find the socket.
+ */
+
+ sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
+
+ /*
+ * If this socket has got a reset its to all intents and purposes
+ * really dead
+ */
+
+ if (sk!=NULL && sk->zapped)
+ sk=NULL;
+
+ if (!redo)
+ {
+ if (tcp_check(th, len, saddr, daddr ))
+ {
+ skb->sk = NULL;
+ kfree_skb(skb,FREE_READ);
+ /*
+ * We don't release the socket because it was
+ * never marked in use.
+ */
+ return(0);
+ }
+ th->seq = ntohl(th->seq);
+
+ /* See if we know about the socket. */
+ if (sk == NULL)
+ {
+ if (!th->rst)
+ tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ return(0);
+ }
+
+ skb->len = len;
+ skb->sk = sk;
+ skb->acked = 0;
+ skb->used = 0;
+ skb->free = 0;
+ skb->saddr = daddr;
+ skb->daddr = saddr;
+
+ /* We may need to add it to the backlog here. */
+ cli();
+ if (sk->inuse)
+ {
+ skb_queue_tail(&sk->back_log, skb);
+ sti();
+ return(0);
+ }
+ sk->inuse = 1;
+ sti();
+ }
+ else
+ {
+ if (!sk)
+ {
+ return(0);
+ }
+ }
+
+
+ if (!sk->prot)
+ {
+ return(0);
+ }
+
+
+ /*
+ * Charge the memory to the socket.
+ */
+
+ if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
+ {
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ sk->rmem_alloc += skb->mem_len;
+
+#ifdef TCP_FASTPATH
+/*
+ * Incoming data stream fastpath.
+ *
+ * We try to optimise two things.
+ * 1) Spot general data arriving without funny options and skip extra checks and the switch.
+ * 2) Spot the common case in raw data receive streams of a packet that has no funny options,
+ * fits exactly on the end of the current queue and may or may not have the ack bit set.
+ *
+ * Case two especially is done inline in this routine so there are no long jumps causing heavy
+ * cache thrashing, no function call overhead (except for the ack sending if needed) and for
+ * speed although further optimizing here is possible.
+ */
+
+ /* I'm trusting gcc to optimise this sensibly... might need judicious application of a software mallet */
+ if(!(sk->shutdown & RCV_SHUTDOWN) && sk->state==TCP_ESTABLISHED && !th->urg && !th->syn && !th->fin && !th->rst)
+ {
+ /* Packets in order. Fits window */
+ if(th->seq == sk->acked_seq+1 && sk->window && tcp_clean_end(sk))
+ {
+ /* Ack is harder */
+ if(th->ack && !tcp_ack(sk, th, saddr, len))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return 0;
+ }
+ /*
+ * Set up variables
+ */
+ skb->len -= (th->doff *4);
+ sk->bytes_rcv += skb->len;
+ tcp_rx_hit2++;
+ if(skb->len)
+ {
+ skb_queue_tail(&sk->receive_queue,skb); /* We already know where to put it */
+ if(sk->window >= skb->len)
+ sk->window-=skb->len; /* We know its effect on the window */
+ else
+ sk->window=0;
+ sk->acked_seq = th->seq+skb->len; /* Easy */
+ skb->acked=1; /* Guaranteed true */
+ if(!sk->delay_acks || sk->ack_backlog >= sk->max_ack_backlog ||
+ sk->bytes_rcv > sk->max_unacked)
+ {
+ tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th , saddr);
+ }
+ else
+ {
+ sk->ack_backlog++;
+ reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
+ }
+ if(!sk->dead)
+ sk->data_ready(sk,0);
+ release_sock(sk);
+ return 0;
+ }
+ }
+ /*
+ * More generic case of arriving data stream in ESTABLISHED
+ */
+ tcp_rx_hit1++;
+ if(!tcp_sequence(sk, th, len, opt, saddr, dev))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return 0;
+ }
+ if(th->ack && !tcp_ack(sk, th, saddr, len))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return 0;
+ }
+ if(tcp_data(skb, sk, saddr, len))
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return 0;
+ }
+ tcp_rx_miss++;
+#endif
+
+ /*
+ * Now deal with all cases.
+ */
+
+ switch(sk->state)
+ {
+
+ /*
+ * This should close the system down if it's waiting
+ * for an ack that is never going to be sent.
+ */
+ case TCP_LAST_ACK:
+ if (th->rst)
+ {
+ sk->zapped=1;
+ sk->err = ECONNRESET;
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->shutdown = SHUTDOWN_MASK;
+ if (!sk->dead)
+ {
+ sk->state_change(sk);
+ }
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ case TCP_ESTABLISHED:
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSING:
+ case TCP_FIN_WAIT1:
+ case TCP_FIN_WAIT2:
+ case TCP_TIME_WAIT:
+
+ /*
+ * is it a good packet?
+ */
+
+ if (!tcp_sequence(sk, th, len, opt, saddr,dev))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (th->rst)
+ {
+ tcp_statistics.TcpEstabResets++;
+ sk->zapped=1;
+ /* This means the thing should really be closed. */
+ sk->err = ECONNRESET;
+ if (sk->state == TCP_CLOSE_WAIT)
+ {
+ sk->err = EPIPE;
+ }
+
+ /*
+ * A reset with a fin just means that
+ * the data was not all read.
+ */
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->shutdown = SHUTDOWN_MASK;
+ if (!sk->dead)
+ {
+ sk->state_change(sk);
+ }
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ if (th->syn)
+ {
+ long seq=sk->write_seq;
+ int st=sk->state;
+ tcp_statistics.TcpEstabResets++;
+ sk->err = ECONNRESET;
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->shutdown = SHUTDOWN_MASK;
+ if(sk->debug)
+ printk("Socket %p reset by SYN while established.\n", sk);
+ if (!sk->dead) {
+ sk->state_change(sk);
+ }
+ /*
+ * The BSD port reuse protocol violation.
+ * I do sometimes wonder how the *bsd people
+ * have the nerve to talk about 'standards'.
+ *
+ * If seq > last used on connection then
+ * open a new connection and use 128000+seq of
+ * old connection.
+ *
+ */
+
+ if(st==TCP_TIME_WAIT && th->seq > sk->acked_seq && sk->dead)
+ {
+ struct sock *psk=sk;
+ /*
+ * Find the listening socket.
+ */
+ sk=get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
+ if(sk && sk->state==TCP_LISTEN)
+ {
+ sk->inuse=1;
+ tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000);
+ release_sock(psk);
+ /* Fall through in case people are
+ also using the piggy backed SYN + data
+ protocol violation */
+ }
+ else
+ {
+ tcp_reset(daddr, saddr, th, psk->prot, opt,dev, psk->ip_tos,psk->ip_ttl);
+ release_sock(psk);
+ kfree_skb(skb, FREE_READ);
+ return 0;
+ }
+ }
+ else
+ {
+ tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ }
+ if (th->ack && !tcp_ack(sk, th, saddr, len)) {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (tcp_urg(sk, th, saddr, len)) {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+
+ if (tcp_data(skb, sk, saddr, len)) {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (th->fin && tcp_fin(skb, sk, th, saddr, dev)) {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ release_sock(sk);
+ return(0);
+
+
+ case TCP_CLOSE:
+ if (sk->dead || sk->daddr) {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (!th->rst) {
+ if (!th->ack)
+ th->ack_seq = 0;
+ if(sk->debug) printk("Reset on closed socket %d.\n",sk->blog);
+ tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
+ }
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+
+ case TCP_LISTEN:
+ if (th->rst) {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ if (th->ack) {
+ if(sk->debug) printk("Reset on listening socket %d.\n",sk->blog);
+ tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (th->syn)
+ {
+ /*
+ * Now we just put the whole thing including
+ * the header and saddr, and protocol pointer
+ * into the buffer. We can't respond until the
+ * user tells us to accept the connection.
+ */
+ tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq());
+ release_sock(sk);
+ return(0);
+ }
+
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+
+ case TCP_SYN_RECV:
+ if (th->syn) {
+ /* Probably a retransmitted syn */
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+
+ default:
+ if (!tcp_sequence(sk, th, len, opt, saddr,dev))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ case TCP_SYN_SENT:
+ if (th->rst)
+ {
+ tcp_statistics.TcpAttemptFails++;
+ sk->err = ECONNREFUSED;
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->shutdown = SHUTDOWN_MASK;
+ sk->zapped = 1;
+ if (!sk->dead)
+ {
+ sk->state_change(sk);
+ }
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ if (!th->ack)
+ {
+ if (th->syn)
+ {
+ /* Crossed SYN's are fine - but talking to
+ yourself is right out... */
+ if(sk->saddr==saddr && sk->daddr==daddr &&
+ sk->dummy_th.source==th->source &&
+ sk->dummy_th.dest==th->dest)
+ {
+ tcp_statistics.TcpAttemptFails++;
+ sk->err = ECONNREFUSED;
+ tcp_set_state(sk,TCP_CLOSE);
+ sk->shutdown = SHUTDOWN_MASK;
+ sk->zapped = 1;
+ if (!sk->dead)
+ {
+ sk->state_change(sk);
+ }
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ tcp_set_state(sk,TCP_SYN_RECV);
+ }
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ switch(sk->state)
+ {
+ case TCP_SYN_SENT:
+ if (!tcp_ack(sk, th, saddr, len))
+ {
+ tcp_statistics.TcpAttemptFails++;
+ tcp_reset(daddr, saddr, th,
+ sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ /*
+ * If the syn bit is also set, switch to
+ * tcp_syn_recv, and then to established.
+ */
+ if (!th->syn)
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ /* Ack the syn and fall through. */
+ sk->acked_seq = th->seq+1;
+ sk->fin_seq = th->seq;
+ tcp_send_ack(sk->sent_seq, th->seq+1,
+ sk, th, sk->daddr);
+
+ case TCP_SYN_RECV:
+ if (!tcp_ack(sk, th, saddr, len))
+ {
+ tcp_statistics.TcpAttemptFails++;
+ tcp_reset(daddr, saddr, th,
+ sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ tcp_set_state(sk,TCP_ESTABLISHED);
+
+ /*
+ * Now we need to finish filling out
+ * some of the tcp header.
+ *
+ * We need to check for mtu info.
+ */
+ tcp_options(sk, th);
+ sk->dummy_th.dest = th->source;
+ sk->copied_seq = sk->acked_seq-1;
+ if (!sk->dead)
+ {
+ sk->state_change(sk);
+ }
+
+ /*
+ * We've already processed his first
+ * ack. In just about all cases that
+ * will have set max_window. This is
+ * to protect us against the possibility
+ * that the initial window he sent was 0.
+ * This must occur after tcp_options, which
+ * sets sk->mtu.
+ */
+ if (sk->max_window == 0)
+ {
+ sk->max_window = 32;
+ sk->mss = min(sk->max_window, sk->mtu);
+ }
+
+ /*
+ * Now process the rest like we were
+ * already in the established state.
+ */
+ if (th->urg)
+ {
+ if (tcp_urg(sk, th, saddr, len))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ }
+ if (tcp_data(skb, sk, saddr, len))
+ kfree_skb(skb, FREE_READ);
+
+ if (th->fin)
+ tcp_fin(skb, sk, th, saddr, dev);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (th->urg)
+ {
+ if (tcp_urg(sk, th, saddr, len))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+ }
+ if (tcp_data(skb, sk, saddr, len))
+ {
+ kfree_skb(skb, FREE_READ);
+ release_sock(sk);
+ return(0);
+ }
+
+ if (!th->fin)
+ {
+ release_sock(sk);
+ return(0);
+ }
+ tcp_fin(skb, sk, th, saddr, dev);
+ release_sock(sk);
+ return(0);
+ }
+}
+
+
+/*
+ * This routine sends a packet with an out of date sequence
+ * number. It assumes the other end will try to ack it.
+ */
+
+static void tcp_write_wakeup(struct sock *sk)
+{
+ struct sk_buff *buff;
+ struct tcphdr *t1;
+ struct device *dev=NULL;
+ int tmp;
+
+ if (sk->zapped)
+ return; /* After a valid reset we can send no more */
+
+ /*
+ * Write data can still be transmitted/retransmitted in the
+ * following states. If any other state is encountered, return.
+ */
+
+ if (sk->state != TCP_ESTABLISHED &&
+ sk->state != TCP_CLOSE_WAIT &&
+ sk->state != TCP_FIN_WAIT1 &&
+ sk->state != TCP_LAST_ACK &&
+ sk->state != TCP_CLOSING
+ ) {
+ return;
+ }
+
+ buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
+ if (buff == NULL)
+ return;
+
+ buff->len = sizeof(struct tcphdr);
+ buff->free = 1;
+ buff->sk = sk;
+ buff->localroute = sk->localroute;
+
+ t1 = (struct tcphdr *) buff->data;
+
+ /* Put in the IP header and routing stuff. */
+ tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
+ IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
+ if (tmp < 0)
+ {
+ sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
+ return;
+ }
+
+ buff->len += tmp;
+ t1 = (struct tcphdr *)((char *)t1 +tmp);
+
+ memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
+
+ /*
+ * Use a previous sequence.
+ * This should cause the other end to send an ack.
+ */
+ t1->seq = htonl(sk->sent_seq-1);
+ t1->ack = 1;
+ t1->res1= 0;
+ t1->res2= 0;
+ t1->rst = 0;
+ t1->urg = 0;
+ t1->psh = 0;
+ t1->fin = 0;
+ t1->syn = 0;
+ t1->ack_seq = ntohl(sk->acked_seq);
+ t1->window = ntohs(tcp_select_window(sk));
+ t1->doff = sizeof(*t1)/4;
+ tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
+
+ /* Send it and free it.
+ * This will prevent the timer from automatically being restarted.
+ */
+ sk->prot->queue_xmit(sk, dev, buff, 1);
+ tcp_statistics.TcpOutSegs++;
+}
+
+void
+tcp_send_probe0(struct sock *sk)
+{
+ if (sk->zapped)
+ return; /* After a valid reset we can send no more */
+
+ tcp_write_wakeup(sk);
+
+ sk->backoff++;
+ sk->rto = min(sk->rto << 1, 120*HZ);
+ reset_timer (sk, TIME_PROBE0, sk->rto);
+ sk->retransmits++;
+ sk->prot->retransmits ++;
+}
+
+/*
+ * Socket option code for TCP.
+ */
+
+int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
+{
+ int val,err;
+
+ if(level!=SOL_TCP)
+ return ip_setsockopt(sk,level,optname,optval,optlen);
+
+ if (optval == NULL)
+ return(-EINVAL);
+
+ err=verify_area(VERIFY_READ, optval, sizeof(int));
+ if(err)
+ return err;
+
+ val = get_fs_long((unsigned long *)optval);
+
+ switch(optname)
+ {
+ case TCP_MAXSEG:
+/*
+ * values greater than interface MTU won't take effect. however at
+ * the point when this call is done we typically don't yet know
+ * which interface is going to be used
+ */
+ if(val<1||val>MAX_WINDOW)
+ return -EINVAL;
+ sk->user_mss=val;
+ return 0;
+ case TCP_NODELAY:
+ sk->nonagle=(val==0)?0:1;
+ return 0;
+ default:
+ return(-ENOPROTOOPT);
+ }
+}
+
+int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
+{
+ int val,err;
+
+ if(level!=SOL_TCP)
+ return ip_getsockopt(sk,level,optname,optval,optlen);
+
+ switch(optname)
+ {
+ case TCP_MAXSEG:
+ val=sk->user_mss;
+ break;
+ case TCP_NODELAY:
+ val=sk->nonagle;
+ break;
+ default:
+ return(-ENOPROTOOPT);
+ }
+ err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(sizeof(int),(unsigned long *) optlen);
+
+ err=verify_area(VERIFY_WRITE, optval, sizeof(int));
+ if(err)
+ return err;
+ put_fs_long(val,(unsigned long *)optval);
+
+ return(0);
+}
+
+
+struct proto tcp_prot = {
+ sock_wmalloc,
+ sock_rmalloc,
+ sock_wfree,
+ sock_rfree,
+ sock_rspace,
+ sock_wspace,
+ tcp_close,
+ tcp_read,
+ tcp_write,
+ tcp_sendto,
+ tcp_recvfrom,
+ ip_build_header,
+ tcp_connect,
+ tcp_accept,
+ ip_queue_xmit,
+ tcp_retransmit,
+ tcp_write_wakeup,
+ tcp_read_wakeup,
+ tcp_rcv,
+ tcp_select,
+ tcp_ioctl,
+ NULL,
+ tcp_shutdown,
+ tcp_setsockopt,
+ tcp_getsockopt,
+ 128,
+ 0,
+ {NULL,},
+ "TCP"
+};
diff --git a/net/inet/tcp.h b/net/inet/tcp.h
new file mode 100644
index 000000000..2dcb22c82
--- /dev/null
+++ b/net/inet/tcp.h
@@ -0,0 +1,134 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP module.
+ *
+ * Version: @(#)tcp.h 1.0.5 05/23/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _TCP_H
+#define _TCP_H
+
+#include <linux/tcp.h>
+
+#define MAX_SYN_SIZE 44 + MAX_HEADER
+#define MAX_FIN_SIZE 40 + MAX_HEADER
+#define MAX_ACK_SIZE 40 + MAX_HEADER
+#define MAX_RESET_SIZE 40 + MAX_HEADER
+#define MAX_WINDOW 8192
+#define MIN_WINDOW 2048
+#define MAX_ACK_BACKLOG 2
+#define MIN_WRITE_SPACE 2048
+#define TCP_WINDOW_DIFF 2048
+
+/* urg_data states */
+#define URG_VALID 0x0100
+#define URG_NOTYET 0x0200
+#define URG_READ 0x0400
+
+#define TCP_RETR1 7 /*
+ * This is how many retries it does before it
+ * tries to figure out if the gateway is
+ * down.
+ */
+
+#define TCP_RETR2 15 /*
+ * This should take at least
+ * 90 minutes to time out.
+ */
+
+#define TCP_TIMEOUT_LEN (15*60*HZ) /* should be about 15 mins */
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully
+ * close the socket, about 60 seconds */
+#define TCP_ACK_TIME (3*HZ) /* time to delay before sending an ACK */
+#define TCP_DONE_TIME 250 /* maximum time to wait before actually
+ * destroying a socket */
+#define TCP_WRITE_TIME 3000 /* initial time to wait for an ACK,
+ * after last transmit */
+#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial timeout value */
+#define TCP_SYN_RETRIES 5 /* number of times to retry opening a
+ * connection */
+#define TCP_PROBEWAIT_LEN 100 /* time to wait between probes when
+ * I've got something to write and
+ * there is no window */
+
+#define TCP_NO_CHECK 0 /* turn to one if you want the default
+ * to be no checksum */
+
+
+/*
+ * TCP option
+ */
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+
+/*
+ * The next routines deal with comparing 32 bit unsigned ints
+ * and worry about wraparound (automatic with unsigned arithmetic).
+ */
+static inline int before(unsigned long seq1, unsigned long seq2)
+{
+ return (long)(seq1-seq2) < 0;
+}
+
+static inline int after(unsigned long seq1, unsigned long seq2)
+{
+ return (long)(seq1-seq2) > 0;
+}
+
+
+/* is s2<=s1<=s3 ? */
+static inline int between(unsigned long seq1, unsigned long seq2, unsigned long seq3)
+{
+ return (after(seq1+1, seq2) && before(seq1, seq3+1));
+}
+
+
+/*
+ * List all states of a TCP socket that can be viewed as a "connected"
+ * state. This now includes TCP_SYN_RECV, although I am not yet fully
+ * convinced that this is the solution for the 'getpeername(2)'
+ * problem. Thanks to Stephen A. Wood <saw@cebaf.gov> -FvK
+ */
+static inline const int
+tcp_connected(const int state)
+{
+ return(state == TCP_ESTABLISHED || state == TCP_CLOSE_WAIT ||
+ state == TCP_FIN_WAIT1 || state == TCP_FIN_WAIT2 ||
+ state == TCP_SYN_RECV);
+}
+
+
+extern struct proto tcp_prot;
+
+
+extern void tcp_err(int err, unsigned char *header, unsigned long daddr,
+ unsigned long saddr, struct inet_protocol *protocol);
+extern void tcp_shutdown (struct sock *sk, int how);
+extern int tcp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr, int redo,
+ struct inet_protocol *protocol);
+
+extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+extern int tcp_select_window(struct sock *sk);
+extern void tcp_send_check(struct tcphdr *th, unsigned long saddr,
+ unsigned long daddr, int len, struct sock *sk);
+extern void tcp_send_probe0(struct sock *sk);
+extern void tcp_enqueue_partial(struct sk_buff *, struct sock *);
+extern struct sk_buff * tcp_dequeue_partial(struct sock *);
+
+
+#endif /* _TCP_H */
diff --git a/net/inet/timer.c b/net/inet/timer.c
new file mode 100644
index 000000000..5ea2f3bbc
--- /dev/null
+++ b/net/inet/timer.c
@@ -0,0 +1,262 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * TIMER - implementation of software timers for IP.
+ *
+ * Version: @(#)timer.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de>
+ * Florian La Roche, <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : To avoid destroying a wait queue as we use it
+ * we defer destruction until the destroy timer goes
+ * off.
+ * Alan Cox : Destroy socket doesn't write a status value to the
+ * socket buffer _AFTER_ freeing it! Also sock ensures
+ * the socket will get removed BEFORE this is called
+ * otherwise if the timer TIME_DESTROY occurs inside
+ * of inet_bh() with this socket being handled it goes
+ * BOOM! Have to stop timer going off if net_bh is
+ * active or the destroy causes crashes.
+ * Alan Cox : Cleaned up unused code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <linux/interrupt.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "arp.h"
+
+void delete_timer (struct sock *t)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli();
+
+ t->timeout = 0;
+ del_timer (&t->timer);
+
+ restore_flags (flags);
+}
+
+void reset_timer (struct sock *t, int timeout, unsigned long len)
+{
+ delete_timer (t);
+ t->timeout = timeout;
+#if 1
+ /* FIXME: ??? */
+ if ((int) len < 0) /* prevent close to infinite timers. THEY _DO_ */
+ len = 3; /* happen (negative values ?) - don't ask me why ! -FB */
+#endif
+ t->timer.expires = len;
+ add_timer (&t->timer);
+}
+
+
+/*
+ * Now we will only be called whenever we need to do
+ * something, but we must be sure to process all of the
+ * sockets that need it.
+ */
+
+void net_timer (unsigned long data)
+{
+ struct sock *sk = (struct sock*)data;
+ int why = sk->timeout;
+
+ /*
+ * only process if socket is not in use
+ */
+
+ cli();
+ if (sk->inuse || in_bh)
+ {
+ sk->timer.expires = 10;
+ add_timer(&sk->timer);
+ sti();
+ return;
+ }
+
+ sk->inuse = 1;
+ sti();
+
+ /* Always see if we need to send an ack. */
+
+ if (sk->ack_backlog && !sk->zapped)
+ {
+ sk->prot->read_wakeup (sk);
+ if (! sk->dead)
+ sk->data_ready(sk,0);
+ }
+
+ /* Now we need to figure out why the socket was on the timer. */
+
+ switch (why)
+ {
+ case TIME_DONE:
+ if (! sk->dead || sk->state != TCP_CLOSE)
+ {
+ printk ("non dead socket in time_done\n");
+ release_sock (sk);
+ break;
+ }
+ destroy_sock (sk);
+ break;
+
+ case TIME_DESTROY:
+ /*
+ * We've waited for a while for all the memory associated with
+ * the socket to be freed.
+ */
+ if(sk->wmem_alloc!=0 || sk->rmem_alloc!=0)
+ {
+ sk->wmem_alloc++; /* So it DOESN'T go away */
+ destroy_sock (sk);
+ sk->wmem_alloc--; /* Might now have hit 0 - fall through and do it again if so */
+ sk->inuse = 0; /* This will be ok, the destroy won't totally work */
+ }
+ if(sk->wmem_alloc==0 && sk->rmem_alloc==0)
+ destroy_sock(sk); /* Socket gone, DON'T update sk->inuse! */
+ break;
+ case TIME_CLOSE:
+ /* We've waited long enough, close the socket. */
+ sk->state = TCP_CLOSE;
+ delete_timer (sk);
+ /* Kill the ARP entry in case the hardware has changed. */
+ arp_destroy (sk->daddr, 0);
+ if (!sk->dead)
+ sk->state_change(sk);
+ sk->shutdown = SHUTDOWN_MASK;
+ reset_timer (sk, TIME_DESTROY, TCP_DONE_TIME);
+ release_sock (sk);
+ break;
+ case TIME_PROBE0:
+ tcp_send_probe0(sk);
+ release_sock (sk);
+ break;
+ case TIME_WRITE: /* try to retransmit. */
+ /* It could be we got here because we needed to send an ack.
+ * So we need to check for that.
+ */
+ {
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ skb = sk->send_head;
+ if (!skb)
+ {
+ restore_flags(flags);
+ }
+ else
+ {
+ if (jiffies < skb->when + sk->rto)
+ {
+ reset_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies);
+ restore_flags(flags);
+ release_sock (sk);
+ break;
+ }
+ restore_flags(flags);
+ /* printk("timer: seq %d retrans %d out %d cong %d\n", sk->send_head->h.seq,
+ sk->retransmits, sk->packets_out, sk->cong_window); */
+ sk->prot->retransmit (sk, 0);
+ if ((sk->state == TCP_ESTABLISHED && sk->retransmits && !(sk->retransmits & 7))
+ || (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR1))
+ {
+ arp_destroy (sk->daddr, 0);
+ ip_route_check (sk->daddr);
+ }
+ if (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR2)
+ {
+ sk->err = ETIMEDOUT;
+ if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2 || sk->state == TCP_CLOSING)
+ {
+ sk->state = TCP_TIME_WAIT;
+ reset_timer (sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
+ }
+ else
+ {
+ sk->prot->close (sk, 1);
+ break;
+ }
+ }
+ }
+ release_sock (sk);
+ break;
+ }
+ case TIME_KEEPOPEN:
+ /*
+ * this reset_timer() call is a hack, this is not
+ * how KEEPOPEN is supposed to work.
+ */
+ reset_timer (sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
+
+ /* Send something to keep the connection open. */
+ if (sk->prot->write_wakeup)
+ sk->prot->write_wakeup (sk);
+ sk->retransmits++;
+ if (sk->shutdown == SHUTDOWN_MASK)
+ {
+ sk->prot->close (sk, 1);
+ sk->state = TCP_CLOSE;
+ }
+ if ((sk->state == TCP_ESTABLISHED && sk->retransmits && !(sk->retransmits & 7))
+ || (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR1))
+ {
+ arp_destroy (sk->daddr, 0);
+ ip_route_check (sk->daddr);
+ release_sock (sk);
+ break;
+ }
+ if (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR2)
+ {
+ arp_destroy (sk->daddr, 0);
+ sk->err = ETIMEDOUT;
+ if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2)
+ {
+ sk->state = TCP_TIME_WAIT;
+ if (!sk->dead)
+ sk->state_change(sk);
+ release_sock (sk);
+ }
+ else
+ {
+ sk->prot->close (sk, 1);
+ }
+ break;
+ }
+ release_sock (sk);
+ break;
+ default:
+ printk ("net_timer: timer expired - reason unknown\n");
+ release_sock (sk);
+ break;
+ }
+}
+
diff --git a/net/inet/udp.c b/net/inet/udp.c
new file mode 100644
index 000000000..6e739e703
--- /dev/null
+++ b/net/inet/udp.c
@@ -0,0 +1,672 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * The User Datagram Protocol (UDP).
+ *
+ * Version: @(#)udp.c 1.0.13 06/02/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : verify_area() calls
+ * Alan Cox : stopped close while in use off icmp
+ * messages. Not a fix but a botch that
+ * for udp at least is 'valid'.
+ * Alan Cox : Fixed icmp handling properly
+ * Alan Cox : Correct error for oversized datagrams
+ * Alan Cox : Tidied select() semantics.
+ * Alan Cox : udp_err() fixed properly, also now
+ * select and read wake correctly on errors
+ * Alan Cox : udp_send verify_area moved to avoid mem leak
+ * Alan Cox : UDP can count its memory
+ * Alan Cox : send to an unknown connection causes
+ * an ECONNREFUSED off the icmp, but
+ * does NOT close.
+ * Alan Cox : Switched to new sk_buff handlers. No more backlog!
+ * Alan Cox : Using generic datagram code. Even smaller and the PEEK
+ * bug no longer crashes it.
+ * Fred Van Kempen : Net2e support for sk->broadcast.
+ * Alan Cox : Uses skb_free_datagram
+ * Alan Cox : Added get/set sockopt support.
+ * Alan Cox : Broadcasting without option set returns EACCES.
+ * Alan Cox : No wakeup calls. Instead we now use the callbacks.
+ * Alan Cox : Use ip_tos and ip_ttl
+ * Alan Cox : SNMP Mibs
+ * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
+ * Matt Dillon : UDP length checks.
+ * Alan Cox : Smarter af_inet used properly.
+ * Alan Cox : Use new kernel side addressing.
+ * Alan Cox : Incorrect return on truncated datagram receive.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fcntl.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/termios.h>
+#include <linux/mm.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include "snmp.h"
+#include "ip.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+#include "sock.h"
+#include "udp.h"
+#include "icmp.h"
+
+/*
+ * SNMP MIB for the UDP layer
+ */
+
+struct udp_mib udp_statistics;
+
+
+
+
+#define min(a,b) ((a)<(b)?(a):(b))
+
+
+/*
+ * This routine is called by the ICMP module when it gets some
+ * sort of error condition. If err < 0 then the socket should
+ * be closed and the error returned to the user. If err > 0
+ * it's just the icmp type << 8 | icmp code.
+ * Header points to the ip header of the error packet. We move
+ * on past this. Then (as it used to claim before adjustment)
+ * header points to the first 8 bytes of the udp header. We need
+ * to find the appropriate port.
+ */
+
+void udp_err(int err, unsigned char *header, unsigned long daddr,
+ unsigned long saddr, struct inet_protocol *protocol)
+{
+ struct udphdr *th;
+ struct sock *sk;
+ struct iphdr *ip=(struct iphdr *)header;
+
+ header += 4*ip->ihl;
+
+ /*
+ * Find the 8 bytes of post IP header ICMP included for us
+ */
+
+ th = (struct udphdr *)header;
+
+ sk = get_sock(&udp_prot, th->source, daddr, th->dest, saddr);
+
+ if (sk == NULL)
+ return; /* No socket for error */
+
+ if (err & 0xff00 ==(ICMP_SOURCE_QUENCH << 8))
+ { /* Slow down! */
+ if (sk->cong_window > 1)
+ sk->cong_window = sk->cong_window/2;
+ return;
+ }
+
+ /*
+ * Various people wanted BSD UDP semantics. Well they've come
+ * back out because they slow down response to stuff like dead
+ * or unreachable name servers and they screw term users something
+ * chronic. Oh and it violates RFC1122. So basically fix your
+ * client code people.
+ */
+
+#ifdef CONFIG_I_AM_A_BROKEN_BSD_WEENIE
+ /*
+ * It's only fatal if we have connected to them. I'm not happy
+ * with this code. Some BSD comparisons need doing.
+ */
+
+ if (icmp_err_convert[err & 0xff].fatal && sk->state == TCP_ESTABLISHED)
+ {
+ sk->err = icmp_err_convert[err & 0xff].errno;
+ sk->error_report(sk);
+ }
+#else
+ if (icmp_err_convert[err & 0xff].fatal)
+ {
+ sk->err = icmp_err_convert[err & 0xff].errno;
+ sk->error_report(sk);
+ }
+#endif
+}
+
+
+static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr)
+{
+ unsigned long sum;
+
+ __asm__( "\t addl %%ecx,%%ebx\n"
+ "\t adcl %%edx,%%ebx\n"
+ "\t adcl $0, %%ebx\n"
+ : "=b"(sum)
+ : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_UDP*256)
+ : "cx","bx","dx" );
+
+ if (len > 3)
+ {
+ __asm__("\tclc\n"
+ "1:\n"
+ "\t lodsl\n"
+ "\t adcl %%eax, %%ebx\n"
+ "\t loop 1b\n"
+ "\t adcl $0, %%ebx\n"
+ : "=b"(sum) , "=S"(uh)
+ : "0"(sum), "c"(len/4) ,"1"(uh)
+ : "ax", "cx", "bx", "si" );
+ }
+
+ /*
+ * Convert from 32 bits to 16 bits.
+ */
+
+ __asm__("\t movl %%ebx, %%ecx\n"
+ "\t shrl $16,%%ecx\n"
+ "\t addw %%cx, %%bx\n"
+ "\t adcw $0, %%bx\n"
+ : "=b"(sum)
+ : "0"(sum)
+ : "bx", "cx");
+
+ /*
+ * Check for an extra word.
+ */
+
+ if ((len & 2) != 0)
+ {
+ __asm__("\t lodsw\n"
+ "\t addw %%ax,%%bx\n"
+ "\t adcw $0, %%bx\n"
+ : "=b"(sum), "=S"(uh)
+ : "0"(sum) ,"1"(uh)
+ : "si", "ax", "bx");
+ }
+
+ /*
+ * Now check for the extra byte.
+ */
+
+ if ((len & 1) != 0)
+ {
+ __asm__("\t lodsb\n"
+ "\t movb $0,%%ah\n"
+ "\t addw %%ax,%%bx\n"
+ "\t adcw $0, %%bx\n"
+ : "=b"(sum)
+ : "0"(sum) ,"S"(uh)
+ : "si", "ax", "bx");
+ }
+
+ /*
+ * We only want the bottom 16 bits, but we never cleared the top 16.
+ */
+
+ return((~sum) & 0xffff);
+}
+
+/*
+ * Generate UDP checksums. These may be disabled, eg for fast NFS over ethernet
+ * We default them enabled.. if you turn them off you either know what you are
+ * doing or get burned...
+ */
+
+static void udp_send_check(struct udphdr *uh, unsigned long saddr,
+ unsigned long daddr, int len, struct sock *sk)
+{
+ uh->check = 0;
+ if (sk && sk->no_check)
+ return;
+ uh->check = udp_check(uh, len, saddr, daddr);
+
+ /*
+ * FFFF and 0 are the same, pick the right one as 0 in the
+ * actual field means no checksum.
+ */
+
+ if (uh->check == 0)
+ uh->check = 0xffff;
+}
+
+
+static int udp_send(struct sock *sk, struct sockaddr_in *sin,
+ unsigned char *from, int len, int rt)
+{
+ struct sk_buff *skb;
+ struct device *dev;
+ struct udphdr *uh;
+ unsigned char *buff;
+ unsigned long saddr;
+ int size, tmp;
+
+ /*
+ * Allocate an sk_buff copy of the packet.
+ */
+
+ size = sk->prot->max_header + len;
+ skb = sock_alloc_send_skb(sk, size, 0, &tmp);
+
+
+ if (skb == NULL)
+ return tmp;
+
+ skb->sk = NULL; /* to avoid changing sk->saddr */
+ skb->free = 1;
+ skb->localroute = sk->localroute|(rt&MSG_DONTROUTE);
+
+ /*
+ * Now build the IP and MAC header.
+ */
+
+ buff = skb->data;
+ saddr = sk->saddr;
+ dev = NULL;
+ tmp = sk->prot->build_header(skb, saddr, sin->sin_addr.s_addr,
+ &dev, IPPROTO_UDP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
+ skb->sk=sk; /* So memory is freed correctly */
+
+ /*
+ * Unable to put a header on the packet.
+ */
+
+ if (tmp < 0 )
+ {
+ sk->prot->wfree(sk, skb->mem_addr, skb->mem_len);
+ return(tmp);
+ }
+
+ buff += tmp;
+ saddr = skb->saddr; /*dev->pa_addr;*/
+ skb->len = tmp + sizeof(struct udphdr) + len; /* len + UDP + IP + MAC */
+ skb->dev = dev;
+
+ /*
+ * Fill in the UDP header.
+ */
+
+ uh = (struct udphdr *) buff;
+ uh->len = htons(len + sizeof(struct udphdr));
+ uh->source = sk->dummy_th.source;
+ uh->dest = sin->sin_port;
+ buff = (unsigned char *) (uh + 1);
+
+ /*
+ * Copy the user data.
+ */
+
+ memcpy_fromfs(buff, from, len);
+
+ /*
+ * Set up the UDP checksum.
+ */
+
+ udp_send_check(uh, saddr, sin->sin_addr.s_addr, skb->len - tmp, sk);
+
+ /*
+ * Send the datagram to the interface.
+ */
+
+ udp_statistics.UdpOutDatagrams++;
+
+ sk->prot->queue_xmit(sk, dev, skb, 1);
+ return(len);
+}
+
+
+static int udp_sendto(struct sock *sk, unsigned char *from, int len, int noblock,
+ unsigned flags, struct sockaddr_in *usin, int addr_len)
+{
+ struct sockaddr_in sin;
+ int tmp;
+
+ /*
+ * Check the flags. We support no flags for UDP sending
+ */
+ if (flags&~MSG_DONTROUTE)
+ return(-EINVAL);
+ /*
+ * Get and verify the address.
+ */
+
+ if (usin)
+ {
+ if (addr_len < sizeof(sin))
+ return(-EINVAL);
+ memcpy(&sin,usin,sizeof(sin));
+ if (sin.sin_family && sin.sin_family != AF_INET)
+ return(-EINVAL);
+ if (sin.sin_port == 0)
+ return(-EINVAL);
+ }
+ else
+ {
+ if (sk->state != TCP_ESTABLISHED)
+ return(-EINVAL);
+ sin.sin_family = AF_INET;
+ sin.sin_port = sk->dummy_th.dest;
+ sin.sin_addr.s_addr = sk->daddr;
+ }
+
+ /*
+ * BSD socket semantics. You must set SO_BROADCAST to permit
+ * broadcasting of data.
+ */
+
+ if(sin.sin_addr.s_addr==INADDR_ANY)
+ sin.sin_addr.s_addr=ip_my_addr();
+
+ if(!sk->broadcast && ip_chk_addr(sin.sin_addr.s_addr)==IS_BROADCAST)
+ return -EACCES; /* Must turn broadcast on first */
+
+ sk->inuse = 1;
+
+ /* Send the packet. */
+ tmp = udp_send(sk, &sin, from, len, flags);
+
+ /* The datagram has been sent off. Release the socket. */
+ release_sock(sk);
+ return(tmp);
+}
+
+/*
+ * In BSD SOCK_DGRAM a write is just like a send.
+ */
+
+static int udp_write(struct sock *sk, unsigned char *buff, int len, int noblock,
+ unsigned flags)
+{
+ return(udp_sendto(sk, buff, len, noblock, flags, NULL, 0));
+}
+
+
+/*
+ * IOCTL requests applicable to the UDP protocol
+ */
+
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ int err;
+ switch(cmd)
+ {
+ case TIOCOUTQ:
+ {
+ unsigned long amount;
+
+ if (sk->state == TCP_LISTEN) return(-EINVAL);
+ amount = sk->prot->wspace(sk)/*/2*/;
+ err=verify_area(VERIFY_WRITE,(void *)arg,
+ sizeof(unsigned long));
+ if(err)
+ return(err);
+ put_fs_long(amount,(unsigned long *)arg);
+ return(0);
+ }
+
+ case TIOCINQ:
+ {
+ struct sk_buff *skb;
+ unsigned long amount;
+
+ if (sk->state == TCP_LISTEN) return(-EINVAL);
+ amount = 0;
+ skb = skb_peek(&sk->receive_queue);
+ if (skb != NULL) {
+ /*
+ * We will only return the amount
+ * of this packet since that is all
+ * that will be read.
+ */
+ amount = skb->len;
+ }
+ err=verify_area(VERIFY_WRITE,(void *)arg,
+ sizeof(unsigned long));
+ if(err)
+ return(err);
+ put_fs_long(amount,(unsigned long *)arg);
+ return(0);
+ }
+
+ default:
+ return(-EINVAL);
+ }
+ return(0);
+}
+
+
+/*
+ * This should be easy, if there is something there we\
+ * return it, otherwise we block.
+ */
+
+int udp_recvfrom(struct sock *sk, unsigned char *to, int len,
+ int noblock, unsigned flags, struct sockaddr_in *sin,
+ int *addr_len)
+{
+ int copied = 0;
+ int truesize;
+ struct sk_buff *skb;
+ int er;
+
+ /*
+ * Check any passed addresses
+ */
+
+ if (addr_len)
+ *addr_len=sizeof(*sin);
+
+ /*
+ * From here the generic datagram does a lot of the work. Come
+ * the finished NET3, it will do _ALL_ the work!
+ */
+
+ skb=skb_recv_datagram(sk,flags,noblock,&er);
+ if(skb==NULL)
+ return er;
+
+ truesize = skb->len;
+ copied = min(len, truesize);
+
+ /*
+ * FIXME : should use udp header size info value
+ */
+
+ skb_copy_datagram(skb,sizeof(struct udphdr),to,copied);
+ sk->stamp=skb->stamp;
+
+ /* Copy the address. */
+ if (sin)
+ {
+ sin->sin_family = AF_INET;
+ sin->sin_port = skb->h.uh->source;
+ sin->sin_addr.s_addr = skb->daddr;
+ }
+
+ skb_free_datagram(skb);
+ release_sock(sk);
+ return(truesize);
+}
+
+/*
+ * Read has the same semantics as recv in SOCK_DGRAM
+ */
+
+int udp_read(struct sock *sk, unsigned char *buff, int len, int noblock,
+ unsigned flags)
+{
+ return(udp_recvfrom(sk, buff, len, noblock, flags, NULL, NULL));
+}
+
+
+int udp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
+{
+ if (addr_len < sizeof(*usin))
+ return(-EINVAL);
+
+ if (usin->sin_family && usin->sin_family != AF_INET)
+ return(-EAFNOSUPPORT);
+ if (usin->sin_addr.s_addr==INADDR_ANY)
+ usin->sin_addr.s_addr=ip_my_addr();
+
+ if(!sk->broadcast && ip_chk_addr(usin->sin_addr.s_addr)==IS_BROADCAST)
+ return -EACCES; /* Must turn broadcast on first */
+
+ sk->daddr = usin->sin_addr.s_addr;
+ sk->dummy_th.dest = usin->sin_port;
+ sk->state = TCP_ESTABLISHED;
+ return(0);
+}
+
+
+static void udp_close(struct sock *sk, int timeout)
+{
+ sk->inuse = 1;
+ sk->state = TCP_CLOSE;
+ if (sk->dead)
+ destroy_sock(sk);
+ else
+ release_sock(sk);
+}
+
+
+/*
+ * All we need to do is get the socket, and then do a checksum.
+ */
+
+int udp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
+ unsigned long daddr, unsigned short len,
+ unsigned long saddr, int redo, struct inet_protocol *protocol)
+{
+ struct sock *sk;
+ struct udphdr *uh;
+ unsigned short ulen;
+
+ /*
+ * Get the header.
+ */
+ uh = (struct udphdr *) skb->h.uh;
+
+ ip_statistics.IpInDelivers++;
+
+ /*
+ * Validate the packet and the UDP length.
+ */
+
+ ulen = ntohs(uh->len);
+
+ if (ulen > len || len < sizeof(*uh) || ulen < sizeof(*uh))
+ {
+ printk("UDP: short packet: %d/%d\n", ulen, len);
+ udp_statistics.UdpInErrors++;
+ kfree_skb(skb, FREE_WRITE);
+ return(0);
+ }
+ len=ulen;
+
+ sk = get_sock(&udp_prot, uh->dest, saddr, uh->source, daddr);
+ if (sk == NULL)
+ {
+ udp_statistics.UdpNoPorts++;
+ if (ip_chk_addr(daddr) == IS_MYADDR)
+ {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, dev);
+ }
+ /*
+ * Hmm. We got an UDP broadcast to a port to which we
+ * don't wanna listen. Ignore it.
+ */
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_WRITE);
+ return(0);
+ }
+
+ if (uh->check && udp_check(uh, len, saddr, daddr))
+ {
+ printk("UDP: bad checksum.\n");
+ udp_statistics.UdpInErrors++;
+ kfree_skb(skb, FREE_WRITE);
+ return(0);
+ }
+
+ skb->sk = sk;
+ skb->dev = dev;
+ skb->len = len;
+
+ /*
+ * These are supposed to be switched.
+ */
+
+ skb->daddr = saddr;
+ skb->saddr = daddr;
+
+
+ /*
+ * Charge it to the socket, dropping if the queue is full.
+ */
+
+ skb->len = len - sizeof(*uh);
+
+ if (sock_queue_rcv_skb(sk,skb)<0)
+ {
+ udp_statistics.UdpInErrors++;
+ ip_statistics.IpInDiscards++;
+ ip_statistics.IpInDelivers--;
+ skb->sk = NULL;
+ kfree_skb(skb, FREE_WRITE);
+ release_sock(sk);
+ return(0);
+ }
+ udp_statistics.UdpInDatagrams++;
+ release_sock(sk);
+ return(0);
+}
+
+
+struct proto udp_prot = {
+ sock_wmalloc,
+ sock_rmalloc,
+ sock_wfree,
+ sock_rfree,
+ sock_rspace,
+ sock_wspace,
+ udp_close,
+ udp_read,
+ udp_write,
+ udp_sendto,
+ udp_recvfrom,
+ ip_build_header,
+ udp_connect,
+ NULL,
+ ip_queue_xmit,
+ ip_retransmit,
+ NULL,
+ NULL,
+ udp_rcv,
+ datagram_select,
+ udp_ioctl,
+ NULL,
+ NULL,
+ ip_setsockopt,
+ ip_getsockopt,
+ 128,
+ 0,
+ {NULL,},
+ "UDP"
+};
+
diff --git a/net/inet/udp.h b/net/inet/udp.h
new file mode 100644
index 000000000..6bfbb3cb7
--- /dev/null
+++ b/net/inet/udp.h
@@ -0,0 +1,50 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP module.
+ *
+ * Version: @(#)udp.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Turned on udp checksums. I don't want to
+ * chase 'memory corruption' bugs that aren't!
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UDP_H
+#define _UDP_H
+
+#include <linux/udp.h>
+
+
+#define UDP_NO_CHECK 0
+
+
+extern struct proto udp_prot;
+
+
+extern void udp_err(int err, unsigned char *header, unsigned long daddr,
+ unsigned long saddr, struct inet_protocol *protocol);
+extern int udp_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int udp_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int udp_connect(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+extern int udp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, unsigned long daddr,
+ unsigned short len, unsigned long saddr, int redo,
+ struct inet_protocol *protocol);
+extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+
+#endif /* _UDP_H */
diff --git a/net/inet/utils.c b/net/inet/utils.c
new file mode 100644
index 000000000..60bbb9f80
--- /dev/null
+++ b/net/inet/utils.c
@@ -0,0 +1,91 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Various kernel-resident INET utility functions; mainly
+ * for format conversion and debugging output.
+ *
+ * Version: @(#)utils.c 1.0.7 05/18/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : verify_area check.
+ * Alan Cox : removed old debugging.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <stdarg.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "ip.h"
+#include "protocol.h"
+#include "tcp.h"
+#include <linux/skbuff.h>
+
+
+/*
+ * Display an IP address in readable format.
+ */
+
+char *in_ntoa(unsigned long in)
+{
+ static char buff[18];
+ char *p;
+
+ p = (char *) &in;
+ sprintf(buff, "%d.%d.%d.%d",
+ (p[0] & 255), (p[1] & 255), (p[2] & 255), (p[3] & 255));
+ return(buff);
+}
+
+
+/*
+ * Convert an ASCII string to binary IP.
+ */
+
+unsigned long in_aton(char *str)
+{
+ unsigned long l;
+ unsigned int val;
+ int i;
+
+ l = 0;
+ for (i = 0; i < 4; i++)
+ {
+ l <<= 8;
+ if (*str != '\0')
+ {
+ val = 0;
+ while (*str != '\0' && *str != '.')
+ {
+ val *= 10;
+ val += *str - '0';
+ str++;
+ }
+ l |= val;
+ if (*str != '\0')
+ str++;
+ }
+ }
+ return(htonl(l));
+}
+