diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-18 22:06:10 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-18 22:06:10 +0000 |
commit | aba4e552a2f2c1492441acbccedd8e0a4c53f916 (patch) | |
tree | 23921efb2b4af590160f034a89ff3da2ecca6e47 /net | |
parent | 9e17e1aa1cf1cb497d2f67147a51831888affcf3 (diff) |
Merge with Linux 2.3.43.
Diffstat (limited to 'net')
-rw-r--r-- | net/ax25/af_ax25.c | 2 | ||||
-rw-r--r-- | net/ipv4/ipconfig.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 60 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 4 | ||||
-rw-r--r-- | net/ipv6/ipv6_sockglue.c | 3 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 3 | ||||
-rw-r--r-- | net/irda/irda_device.c | 10 | ||||
-rw-r--r-- | net/irda/wrapper.c | 30 | ||||
-rw-r--r-- | net/netrom/af_netrom.c | 2 | ||||
-rw-r--r-- | net/rose/af_rose.c | 2 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 4 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 94 |
14 files changed, 121 insertions, 106 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 2941951df..f39e19d41 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1313,7 +1313,7 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, int len, struct int lv; int addr_len = msg->msg_namelen; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR)) + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR)) return -EINVAL; if (sk->zapped) diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index d4d556cb5..82e443d4d 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1,5 +1,5 @@ /* - * $Id: ipconfig.c,v 1.25 2000/01/09 02:19:31 davem Exp $ + * $Id: ipconfig.c,v 1.26 2000/01/29 07:42:08 davem Exp $ * * Automatic Configuration of IP -- use BOOTP or RARP or user-supplied * information to configure own IP address and routes. diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e01892326..aa890aef3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -5,7 +5,7 @@ * * Implementation of the Transmission Control Protocol(TCP). * - * Version: $Id: tcp.c,v 1.160 2000/01/24 18:40:32 davem Exp $ + * Version: $Id: tcp.c,v 1.161 2000/01/31 01:21:16 davem Exp $ * * Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> @@ -1119,7 +1119,7 @@ static void cleanup_rbuf(struct sock *sk, int copied) /* Delayed ACKs frequently hit locked sockets during bulk receive. */ time_to_ack = tp->ack.blocked && tp->ack.pending; -#if 1/*def CONFIG_TCP_MORE_COARSE_ACKS*/ +#ifdef CONFIG_TCP_MORE_COARSE_ACKS if (tp->ack.pending && (tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss) time_to_ack = 1; @@ -1344,7 +1344,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, tp->ucopy.len = len; - BUG_TRAP(tp->copied_seq == tp->rcv_nxt); + BUG_TRAP(tp->copied_seq == tp->rcv_nxt || (flags&MSG_PEEK)); /* Ugly... If prequeue is not empty, we have to * process it before releasing socket, otherwise @@ -1408,7 +1408,7 @@ do_prequeue: copied += chunk; } } -#if 1/*def CONFIG_TCP_MORE_COARSE_ACKS*/ +#ifdef CONFIG_TCP_MORE_COARSE_ACKS if (tp->ack.pending && (tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss) tcp_send_ack(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d61a5df02..366dddc89 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5,7 +5,7 @@ * * Implementation of the Transmission Control Protocol(TCP). * - * Version: $Id: tcp_input.c,v 1.183 2000/01/24 18:40:33 davem Exp $ + * Version: $Id: tcp_input.c,v 1.186 2000/01/31 20:26:13 davem Exp $ * * Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> @@ -1346,7 +1346,8 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, goto kill_with_rst; /* Dup ACK? */ - if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt)) { + if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) || + TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { tcp_tw_put(tw); return TCP_TW_SUCCESS; } @@ -1912,6 +1913,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) !tp->urg_data) { int chunk = min(skb->len, tp->ucopy.len); + __set_current_state(TASK_RUNNING); + local_bh_enable(); if (memcpy_toiovec(tp->ucopy.iov, skb->data, chunk)) { sk->err = EFAULT; @@ -1948,10 +1951,9 @@ queue_and_out: !tp->urg_data) tcp_fast_path_on(tp); - if (eaten) + if (eaten) { kfree_skb(skb); - - if (!sk->dead) { + } else if (!sk->dead) { wake_up_interruptible(sk->sleep); sock_wake_async(sk->socket,1, POLL_IN); } @@ -2296,29 +2298,7 @@ static int prune_queue(struct sock *sk) * fails? -ANK */ - /* F.e. one possible tactics is: */ - do { - u32 new_clamp = (tp->rcv_nxt-tp->copied_seq) + pruned; - - /* This guy is not a good guy. I bet, he martirized cats, - * when was child and grew up to finished sadist. Clamp him! - */ - if (new_clamp > 3*tp->ack.rcv_mss) - new_clamp -= tp->ack.rcv_mss; - else - new_clamp = 2*tp->ack.rcv_mss; - tp->window_clamp = min(tp->window_clamp, new_clamp); - } while (0); - /* Though it should be made earlier, when we are still not - * congested. This header prediction logic sucks - * without true implementation of VJ algorithm. - * I am really anxious. How was it possible to combine - * header prediction and sending ACKs outside of recvmsg() context? - * They _are_ incompatible. We should not advance window so - * brainlessly and we should not advertise so huge window from the very - * beginning. BTW window "prediction" does not speedup anything! - * SIlly, silly, silly. - */ + tp->ack.quick = 0; if(atomic_read(&sk->rmem_alloc) < (sk->rcvbuf << 1)) return 0; @@ -2525,10 +2505,14 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, NET_INC_STATS_BH(TCPHPHitsToUser); + __set_current_state(TASK_RUNNING); + if (tcp_copy_to_iovec(sk, skb, tcp_header_len)) goto csum_error; __skb_pull(skb,tcp_header_len); + + tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; } else { if (tcp_checksum_complete_user(sk, skb)) goto csum_error; @@ -2548,15 +2532,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, */ __skb_queue_tail(&sk->receive_queue, skb); skb_set_owner_r(skb, sk); - } - tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - /* FIN bit check is not done since if FIN is set in - * this frame, the pred_flags won't match up. -DaveM - */ - wake_up_interruptible(sk->sleep); - sock_wake_async(sk->socket,1, POLL_IN); + /* FIN bit check is not done since if FIN is set in + * this frame, the pred_flags won't match up. -DaveM + */ + wake_up_interruptible(sk->sleep); + sock_wake_async(sk->socket,1, POLL_IN); + } tcp_event_data_recv(tp, skb); @@ -3467,7 +3451,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, int tmo; if (tp->linger2 < 0 || - after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { + (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && + after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); return 1; } @@ -3526,7 +3511,8 @@ step6: * BSD 4.4 also does reset. */ if (sk->shutdown & RCV_SHUTDOWN) { - if (after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { + if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && + after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { tcp_reset(sk); return 1; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 7420e268f..e54ce2ec2 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -5,7 +5,7 @@ * * Implementation of the Transmission Control Protocol(TCP). * - * Version: $Id: tcp_ipv4.c,v 1.197 2000/01/21 06:37:28 davem Exp $ + * Version: $Id: tcp_ipv4.c,v 1.198 2000/01/31 01:21:20 davem Exp $ * * IPv4 specific functions * @@ -598,6 +598,7 @@ unique: *skp = sk; sk->pprev = skp; + sk->hashent = hash; sock_prot_inc_use(sk->prot); write_unlock_bh(&head->lock); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d6bc8a205..5583ea6cb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -5,7 +5,7 @@ * * Implementation of the Transmission Control Protocol(TCP). * - * Version: $Id: tcp_output.c,v 1.119 2000/01/19 04:06:15 davem Exp $ + * Version: $Id: tcp_output.c,v 1.120 2000/01/31 01:21:22 davem Exp $ * * Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> @@ -1005,7 +1005,7 @@ int tcp_connect(struct sock *sk, struct sk_buff *buff) struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); /* Reserve space for headers. */ - skb_reserve(buff, MAX_TCP_HEADER + 15); + skb_reserve(buff, MAX_TCP_HEADER); /* We'll fix this up when we get a response from the other end. * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 873d22c3d..9f13435fa 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -7,7 +7,7 @@ * * Based on linux/net/ipv4/ip_sockglue.c * - * $Id: ipv6_sockglue.c,v 1.31 2000/01/16 05:11:38 davem Exp $ + * $Id: ipv6_sockglue.c,v 1.32 2000/01/31 01:21:25 davem Exp $ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -183,6 +183,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval, local_bh_enable(); sk->prot = &udp_prot; sk->socket->ops = &inet_dgram_ops; + sk->family = PF_INET; } opt = xchg(&np->opt, NULL); if (opt) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 420b81f4a..47dcf8ce0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -5,7 +5,7 @@ * Authors: * Pedro Roque <roque@di.fc.ul.pt> * - * $Id: tcp_ipv6.c,v 1.118 2000/01/18 08:24:22 davem Exp $ + * $Id: tcp_ipv6.c,v 1.119 2000/01/31 01:21:26 davem Exp $ * * Based on: * linux/net/ipv4/tcp.c @@ -450,6 +450,7 @@ unique: *skp = sk; sk->pprev = skp; + sk->hashent = hash; sock_prot_inc_use(sk->prot); write_unlock_bh(&head->lock); diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 0e272947d..d1d89b1e4 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -6,7 +6,7 @@ * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sat Oct 9 09:22:27 1999 - * Modified at: Wed Jan 5 14:17:16 2000 + * Modified at: Sun Jan 23 17:41:24 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. @@ -55,7 +55,8 @@ #include <net/irda/wrapper.h> extern int irtty_init(void); -extern int pc87108_init(void); +extern int nsc_ircc_init(void); +extern int ircc_init(void); extern int w83977af_init(void); extern int esi_init(void); extern int tekram_init(void); @@ -121,7 +122,7 @@ int __init irda_device_init( void) w83977af_init(); #endif #ifdef CONFIG_NSC_FIR - pc87108_init(); + nsc_ircc_init(); #endif #ifdef CONFIG_TOSHIBA_FIR toshoboe_init(); @@ -144,9 +145,6 @@ int __init irda_device_init( void) #ifdef CONFIG_LITELINK_DONGLE litelink_init(); #endif -#ifdef CONFIG_AIRPORT_DONGLE - airport_init(); -#endif #ifdef CONFIG_OLD_BELKIN old_belkin_init(); #endif diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c index 13e4d0465..623328af1 100644 --- a/net/irda/wrapper.c +++ b/net/irda/wrapper.c @@ -6,12 +6,12 @@ * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Mon Aug 4 20:40:53 1997 - * Modified at: Sun Dec 12 13:46:40 1999 + * Modified at: Fri Jan 28 13:21:09 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * Modified at: Fri May 28 3:11 CST 1999 * Modified by: Horst von Brand <vonbrand@sleipnir.valparaiso.cl> * - * Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>, + * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -41,19 +41,19 @@ static inline int stuff_byte(__u8 byte, __u8 *buf); static void state_outside_frame(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte); + iobuff_t *rx_buff, __u8 byte); static void state_begin_frame(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte); + iobuff_t *rx_buff, __u8 byte); static void state_link_escape(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte); + iobuff_t *rx_buff, __u8 byte); static void state_inside_frame(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte); + iobuff_t *rx_buff, __u8 byte); static void (*state[])(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte) = + iobuff_t *rx_buff, __u8 byte) = { state_outside_frame, state_begin_frame, @@ -180,7 +180,7 @@ inline void async_bump(struct net_device *dev, struct net_device_stats *stats, return; } - /* Align IP header to 20 bytes */ + /* Align IP header to 20 bytes */ skb_reserve(skb, 1); /* Copy data without CRC */ @@ -205,7 +205,7 @@ inline void async_bump(struct net_device *dev, struct net_device_stats *stats, */ inline void async_unwrap_char(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte) + iobuff_t *rx_buff, __u8 byte) { (*state[rx_buff->state])(dev, stats, rx_buff, byte); } @@ -218,7 +218,7 @@ inline void async_unwrap_char(struct net_device *dev, */ static void state_outside_frame(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte) + iobuff_t *rx_buff, __u8 byte) { switch (byte) { case BOF: @@ -245,7 +245,7 @@ static void state_outside_frame(struct net_device *dev, */ static void state_begin_frame(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte) + iobuff_t *rx_buff, __u8 byte) { /* Time to initialize receive buffer */ rx_buff->data = rx_buff->head; @@ -276,14 +276,14 @@ static void state_begin_frame(struct net_device *dev, } /* - * Function state_link_escape (idev, byte) + * Function state_link_escape (dev, byte) * * Found link escape character * */ static void state_link_escape(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte) + iobuff_t *rx_buff, __u8 byte) { switch (byte) { case BOF: /* New frame? */ @@ -315,14 +315,14 @@ static void state_link_escape(struct net_device *dev, } /* - * Function state_inside_frame (idev, byte) + * Function state_inside_frame (dev, byte) * * Handle bytes received within a frame * */ static void state_inside_frame(struct net_device *dev, struct net_device_stats *stats, - struct iobuff_t *rx_buff, __u8 byte) + iobuff_t *rx_buff, __u8 byte) { int ret = 0; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index f378632ac..acdea4c9c 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -978,7 +978,7 @@ static int nr_sendmsg(struct socket *sock, struct msghdr *msg, int len, struct s unsigned char *asmptr; int size; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR)) + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR)) return -EINVAL; if (sk->zapped) diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index e43573991..93f1a40ab 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1010,7 +1010,7 @@ static int rose_sendmsg(struct socket *sock, struct msghdr *msg, int len, unsigned char *asmptr; int n, size, qbit = 0; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR)) + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR)) return -EINVAL; if (sk->zapped) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index bb98653e1..1c1a48582 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -1,6 +1,6 @@ /* net/sched/sch_dsmark.c - Differentiated Services field marker */ -/* Written 1998,1999 by Werner Almesberger, EPFL ICA */ +/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ #include <linux/config.h> @@ -82,7 +82,7 @@ static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg) static unsigned long dsmark_get(struct Qdisc *sch,u32 classid) { - struct dsmark_qdisc_data *p = PRIV(sch); + struct dsmark_qdisc_data *p __attribute__((unused)) = PRIV(sch); DPRINTK("dsmark_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid); return TC_H_MIN(classid)+1; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index a9f05e20f..e463b33ca 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -10,9 +10,10 @@ * Authors: J Hadi Salim (hadi@nortelnetworks.com) 1998,1999 * * 991129: - Bug fix with grio mode - * - a better sing. AvgQ mode with Grio + * - a better sing. AvgQ mode with Grio(WRED) * - A finer grained VQ dequeue based on sugestion * from Ren Liu + * - More error checks * * * @@ -134,17 +135,10 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog, sch->stats.backlog); /* sum up all the qaves of prios <= to ours to get the new qave*/ - if (t->grio) { + if (!t->eqp && t->grio) { for (i=0;i<t->DPs;i++) { if ((!t->tab[i]) || (i==q->DP)) continue; - if (t->tab[i]->prio == q->prio ){ - qave=0; - t->eqp=1; - q->qave=t->tab[t->def]->qave; - q->qidlestart=t->tab[t->def]->qidlestart; - break; - } if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart))) qave +=t->tab[i]->qave; @@ -155,6 +149,12 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->packetsin++; q->bytesin+=skb->len; + if (t->eqp && t->grio) { + qave=0; + q->qave=t->tab[t->def]->qave; + q->qidlestart=t->tab[t->def]->qidlestart; + } + if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) { long us_idle; PSCHED_GET_TIME(now); @@ -163,7 +163,12 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF]; } else { - q->qave += q->backlog - (q->qave >> q->Wlog); + if (t->eqp) { + q->qave += sch->stats.backlog - (q->qave >> q->Wlog); + } else { + q->qave += q->backlog - (q->qave >> q->Wlog); + } + } @@ -232,18 +237,22 @@ gred_dequeue(struct Qdisc* sch) skb = __skb_dequeue(&sch->q); if (skb) { - q= t->tab[(skb->tc_index&0xf)]; sch->stats.backlog -= skb->len; - q->backlog -= skb->len; - if (!q->backlog && !t->eqp) - PSCHED_GET_TIME(q->qidlestart); + q= t->tab[(skb->tc_index&0xf)]; + if (q) { + q->backlog -= skb->len; + if (!q->backlog && !t->eqp) + PSCHED_GET_TIME(q->qidlestart); + } else { + D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); + } return skb; } if (t->eqp) { q= t->tab[t->def]; if (!q) - printk("no default VQ set: Results will be " + D2PRINTK("no default VQ set: Results will be " "screwed up\n"); else PSCHED_GET_TIME(q->qidlestart); @@ -256,31 +265,37 @@ static int gred_drop(struct Qdisc* sch) { struct sk_buff *skb; - int i; struct gred_sched_data *q; struct gred_sched *t= (struct gred_sched *)sch->data; skb = __skb_dequeue_tail(&sch->q); if (skb) { - q= t->tab[(skb->tc_index&0xf)]; sch->stats.backlog -= skb->len; sch->stats.drops++; - q->backlog -= skb->len; - q->other++; + q= t->tab[(skb->tc_index&0xf)]; + if (q) { + q->backlog -= skb->len; + q->other++; + if (!q->backlog && !t->eqp) + PSCHED_GET_TIME(q->qidlestart); + } else { + D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); + } + kfree_skb(skb); return 1; } -/* could probably do it for a single VQ before freeing the skb */ - for (i=0;i<t->DPs;i++) { - q= t->tab[i]; - if (!q) - continue; - PSCHED_GET_TIME(q->qidlestart); - } + q=t->tab[t->def]; + if (!q) { + D2PRINTK("no default VQ set: Results might be screwed up\n"); + return 0; + } + PSCHED_GET_TIME(q->qidlestart); return 0; + } static void gred_reset(struct Qdisc* sch) @@ -295,7 +310,6 @@ static void gred_reset(struct Qdisc* sch) kfree_skb(skb); sch->stats.backlog = 0; -/* could probably do it for a single VQ before freeing the skb */ for (i=0;i<t->DPs;i++) { q= t->tab[i]; if (!q) @@ -319,6 +333,7 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) struct tc_gred_sopt *sopt; struct rtattr *tb[TCA_GRED_STAB]; struct rtattr *tb2[TCA_GRED_STAB]; + int i; if (opt == NULL || rtattr_parse(tb, TCA_GRED_STAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ) @@ -340,13 +355,13 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) } - if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 || + if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 || RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) || RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256) - return -EINVAL; + return -EINVAL; - ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]); - if (ctl->DP > MAX_DPs-1 || ctl->DP <0) { + ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]); + if (ctl->DP > MAX_DPs-1 || ctl->DP <0) { /* misbehaving is punished! Put in the default drop probability */ DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP " "set to default at %d\n",ctl->DP,table->def); @@ -356,8 +371,8 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) if (table->tab[ctl->DP] == NULL) { table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL); - memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data))); - } + memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data))); + } q= table->tab[ctl->DP]; if (table->grio) { @@ -400,6 +415,19 @@ static int gred_change(struct Qdisc *sch, struct rtattr *opt) PSCHED_SET_PASTPERFECT(q->qidlestart); memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256); + if ( table->initd && table->grio) { + /* this looks ugly but its not in the fast path */ + for (i=0;i<table->DPs;i++) { + if ((!table->tab[i]) || (i==q->DP) ) + continue; + if (table->tab[i]->prio == q->prio ){ + /* WRED mode detected */ + table->eqp=1; + break; + } + } + } + if (!table->initd) { table->initd=1; /* |