diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
commit | 529c593ece216e4aaffd36bd940cb94f1fa63129 (patch) | |
tree | 78f1c0b805f5656aa7b0417a043c5346f700a2cf /include/net | |
parent | 0bd079751d25808d1972baee5c4eaa1db2227257 (diff) |
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/atmclip.h | 6 | ||||
-rw-r--r-- | include/net/dsfield.h | 4 | ||||
-rw-r--r-- | include/net/irda/nsc-ircc.h | 3 | ||||
-rw-r--r-- | include/net/irda/smc-ircc.h | 3 | ||||
-rw-r--r-- | include/net/neighbour.h | 1 | ||||
-rw-r--r-- | include/net/pkt_sched.h | 66 | ||||
-rw-r--r-- | include/net/snmp.h | 2 | ||||
-rw-r--r-- | include/net/sock.h | 12 | ||||
-rw-r--r-- | include/net/tcp.h | 34 |
9 files changed, 42 insertions, 89 deletions
diff --git a/include/net/atmclip.h b/include/net/atmclip.h index f350c3103..edcae7c37 100644 --- a/include/net/atmclip.h +++ b/include/net/atmclip.h @@ -1,6 +1,6 @@ /* net/atm/atmarp.h - RFC1577 ATM ARP */ -/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */ +/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ #ifndef _ATMCLIP_H @@ -26,7 +26,9 @@ struct clip_vcc { unsigned long last_use; /* last send or receive operation */ unsigned long idle_timeout; /* keep open idle for so many jiffies*/ void (*old_push)(struct atm_vcc *vcc,struct sk_buff *skb); - /* keep old push fn for detaching */ + /* keep old push fn for chaining */ + void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb); + /* keep old pop fn for chaining */ struct clip_vcc *next; /* next VCC */ }; diff --git a/include/net/dsfield.h b/include/net/dsfield.h index 2201a88d9..778b6baea 100644 --- a/include/net/dsfield.h +++ b/include/net/dsfield.h @@ -1,6 +1,6 @@ /* include/net/dsfield.h - Manipulation of the Differentiated Services field */ -/* Written 1998 by Werner Almesberger, EPFL ICA */ +/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ #ifndef __NET_DSFIELD_H @@ -46,7 +46,7 @@ extern __inline__ void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask, __u16 tmp; tmp = ntohs(*(__u16 *) ipv6h); - tmp = (tmp & (mask << 4)) | (value << 4); + tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4); *(__u16 *) ipv6h = htons(tmp); } diff --git a/include/net/irda/nsc-ircc.h b/include/net/irda/nsc-ircc.h index 75e5c2bc7..637458dd1 100644 --- a/include/net/irda/nsc-ircc.h +++ b/include/net/irda/nsc-ircc.h @@ -31,6 +31,7 @@ #include <linux/time.h> #include <linux/spinlock.h> +#include <linux/pm.h> #include <asm/io.h> /* DMA modes needed */ @@ -253,6 +254,8 @@ struct nsc_ircc_cb { __u32 flags; /* Interface flags */ __u32 new_speed; int index; /* Instance index */ + + struct pm_dev *dev; }; static inline void switch_bank(int iobase, int bank) diff --git a/include/net/irda/smc-ircc.h b/include/net/irda/smc-ircc.h index cac7644ca..0bd5e38b4 100644 --- a/include/net/irda/smc-ircc.h +++ b/include/net/irda/smc-ircc.h @@ -31,6 +31,7 @@ #define SMC_IRCC_H #include <linux/spinlock.h> +#include <linux/pm.h> #include <net/irda/irport.h> @@ -181,6 +182,8 @@ struct ircc_cb { int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */ int tx_len; /* Number of frames in tx_buff */ + + struct pm_dev *pmdev; }; #endif /* SMC_IRCC_H */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 1a6f1dad0..b63398881 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -163,6 +163,7 @@ struct neigh_table unsigned long last_rand; struct neigh_parms *parms_list; kmem_cache_t *kmem_cachep; + struct tasklet_struct gc_task; struct neigh_statistics stats; struct neighbour *hash_buckets[NEIGH_HASHMASK+1]; struct pneigh_entry *phash_buckets[PNEIGH_HASHMASK+1]; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index b866777a6..2c4b4cff9 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -63,19 +63,10 @@ struct Qdisc_ops int (*dump)(struct Qdisc *, struct sk_buff *); }; -struct Qdisc_head -{ - struct Qdisc_head *forw; - struct Qdisc_head *back; -}; - -extern struct Qdisc_head qdisc_head; -extern spinlock_t qdisc_runqueue_lock; extern rwlock_t qdisc_tree_lock; struct Qdisc { - struct Qdisc_head h; int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); struct sk_buff * (*dequeue)(struct Qdisc *dev); unsigned flags; @@ -87,11 +78,9 @@ struct Qdisc u32 handle; atomic_t refcnt; struct sk_buff_head q; - struct net_device *dev; + struct net_device *dev; struct tc_stats stats; - unsigned long tx_timeo; - unsigned long tx_last; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); /* This field is deprecated, but it is still used by CBQ @@ -437,60 +426,13 @@ int teql_init(void); int tc_filter_init(void); int pktsched_init(void); -extern void qdisc_run_queues(void); extern int qdisc_restart(struct net_device *dev); -extern spinlock_t qdisc_runqueue_lock; - -/* Is it on run list? Reliable only under qdisc_runqueue_lock. */ - -extern __inline__ int qdisc_on_runqueue(struct Qdisc *q) -{ - return q->h.forw != NULL; -} - -/* Is run list not empty? Reliable only under qdisc_runqueue_lock. */ - -extern __inline__ int qdisc_pending(void) -{ - return qdisc_head.forw != &qdisc_head; -} - -/* Add qdisc to tail of run list. Called with BH, disabled on this CPU */ - -extern __inline__ void qdisc_run(struct Qdisc *q) -{ - spin_lock(&qdisc_runqueue_lock); - if (!qdisc_on_runqueue(q) && q->dev) { - q->h.forw = &qdisc_head; - q->h.back = qdisc_head.back; - qdisc_head.back->forw = &q->h; - qdisc_head.back = &q->h; - } - spin_unlock(&qdisc_runqueue_lock); -} - -extern __inline__ int __qdisc_wakeup(struct net_device *dev) +extern __inline__ void qdisc_run(struct net_device *dev) { - int res; - - while ((res = qdisc_restart(dev))<0 && !dev->tbusy) + while (!test_bit(LINK_STATE_XOFF, &dev->state) && + qdisc_restart(dev)<0) /* NOTHING */; - - return res; -} - - -/* If the device is not throttled, restart it and add to run list. - * BH must be disabled on this CPU. Usually, it is called by timers. - */ - -extern __inline__ void qdisc_wakeup(struct net_device *dev) -{ - spin_lock(&dev->queue_lock); - if (dev->tbusy || __qdisc_wakeup(dev)) - qdisc_run(dev->qdisc); - spin_unlock(&dev->queue_lock); } /* Calculate maximal size of packet seen by hard_start_xmit diff --git a/include/net/snmp.h b/include/net/snmp.h index 5105fd220..8bcb17085 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -202,7 +202,7 @@ struct linux_mib unsigned long __pad[32-26]; }; -#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_interrupt()].field++) +#define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_softirq()].field++) #define SNMP_INC_STATS_BH(mib, field) ((mib)[2*smp_processor_id()].field++) #define SNMP_INC_STATS_USER(mib, field) ((mib)[2*smp_processor_id()+1].field++) diff --git a/include/net/sock.h b/include/net/sock.h index 5dc9f5be3..92519ee88 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -242,6 +242,7 @@ struct tcp_opt { __u32 lrcvtime; /* timestamp of last received data packet*/ __u16 last_seg_size; /* Size of last incoming segment */ __u16 rcv_mss; /* MSS used for delayed ACK decisions */ + __u32 rcv_segs; /* Number of received segments since last ack */ } ack; /* Data for direct copy to user */ @@ -325,7 +326,6 @@ struct tcp_opt { __u32 rcv_tsecr; /* Time stamp echo reply */ __u32 ts_recent; /* Time stamp to echo next */ long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ - __u32 last_ack_sent; /* last ack we sent (RTTM/PAWS) */ /* SACKs data */ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ @@ -934,20 +934,20 @@ extern __inline__ void sock_put(struct sock *sk) */ extern __inline__ void sock_orphan(struct sock *sk) { - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); sk->dead = 1; sk->socket = NULL; sk->sleep = NULL; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); } extern __inline__ void sock_graft(struct sock *sk, struct socket *parent) { - write_lock_irq(&sk->callback_lock); + write_lock_bh(&sk->callback_lock); sk->sleep = &parent->wait; parent->sk = sk; sk->socket = parent; - write_unlock_irq(&sk->callback_lock); + write_unlock_bh(&sk->callback_lock); } @@ -1150,7 +1150,7 @@ extern __inline__ int sock_writeable(struct sock *sk) extern __inline__ int gfp_any(void) { - return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; + return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } extern __inline__ long sock_rcvtimeo(struct sock *sk, int noblock) diff --git a/include/net/tcp.h b/include/net/tcp.h index db16f7253..d2c937f96 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -20,6 +20,8 @@ #define TCP_DEBUG 1 #undef TCP_FORMAL_WINDOW +#define TCP_MORE_COARSE_ACKS +#undef TCP_LESS_COARSE_ACKS #include <linux/config.h> #include <linux/tcp.h> @@ -287,10 +289,10 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) * TIME-WAIT timer. */ -#define TCP_DELACK_MAX (HZ/2) /* maximal time to delay before sending an ACK */ +#define TCP_DELACK_MAX (HZ/5) /* maximal time to delay before sending an ACK */ #define TCP_DELACK_MIN (2) /* minimal time to delay before sending an ACK, - * 2 scheduler ticks, not depending on HZ */ -#define TCP_ATO_MAX ((TCP_DELACK_MAX*4)/5) /* ATO producing TCP_DELACK_MAX */ + * 2 scheduler ticks, not depending on HZ. */ +#define TCP_ATO_MAX (HZ/2) /* Clamp ATO estimator at his value. */ #define TCP_ATO_MIN 2 #define TCP_RTO_MAX (120*HZ) #define TCP_RTO_MIN (HZ/5) @@ -335,12 +337,14 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) so that we select tick to get range about 4 seconds. */ -#if HZ == 100 || HZ == 128 -#define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG) +#if HZ == 20 +# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG) +#elif HZ == 100 || HZ == 128 +# define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG) #elif HZ == 1024 -#define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG) +# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG) #else -#error HZ != 100 && HZ != 1024. +# error HZ != 20 && HZ != 100 && HZ != 1024. #endif /* @@ -594,11 +598,8 @@ extern int tcp_rcv_established(struct sock *sk, static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp) { - if (tp->ack.quick && --tp->ack.quick == 0 && !tp->ack.pingpong) { - /* Leaving quickack mode we deflate ATO to give peer - * a time to adapt to new worse(!) RTO. It is not required - * in pingpong mode, when ACKs were delayed in any case. - */ + if (tp->ack.quick && --tp->ack.quick == 0) { + /* Leaving quickack mode we deflate ATO. */ tp->ack.ato = TCP_ATO_MIN; } } @@ -825,12 +826,13 @@ extern __inline__ u16 tcp_select_window(struct sock *sk) * Don't update rcv_wup/rcv_wnd here or else * we will not be able to advertise a zero * window in time. --DaveM + * + * Relax Will Robinson. */ new_win = cur_win; - } else { - tp->rcv_wnd = new_win; - tp->rcv_wup = tp->rcv_nxt; } + tp->rcv_wnd = new_win; + tp->rcv_wup = tp->rcv_nxt; /* RFC1323 scaling applied */ new_win >>= tp->rcv_wscale; @@ -1186,7 +1188,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) /* fall through */ default: if (oldstate==TCP_ESTABLISHED) - tcp_statistics[smp_processor_id()*2+!in_interrupt()].TcpCurrEstab--; + tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--; } /* Change state AFTER socket is unhashed to avoid closed |