diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1999-01-04 16:03:48 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1999-01-04 16:03:48 +0000 |
commit | 78c388aed2b7184182c08428db1de6c872d815f5 (patch) | |
tree | 4b2003b1b4ceb241a17faa995da8dd1004bb8e45 /include/net/sock.h | |
parent | eb7a5bf93aaa4be1d7c6181100ab7639e74d67f7 (diff) |
Merge with Linux 2.1.131 and more MIPS goodies.
(Did I mention that CVS is buggy ...)
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 24 |
1 files changed, 7 insertions, 17 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index ad27511c2..a9cc39260 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -139,6 +139,7 @@ struct ipv6_pinfo { struct in6_addr *daddr_cache; __u32 flow_lbl; + __u32 frag_size; int hop_limit; int mcast_hops; int mcast_oif; @@ -159,7 +160,9 @@ struct ipv6_pinfo { } rxopt; /* sockopt flags */ - __u8 mc_loop:1; + __u8 mc_loop:1, + recverr:1, + pmtudisc:2; struct ipv6_mc_socklist *ipv6_mc_list; __u32 dst_cookie; @@ -238,6 +241,7 @@ struct tcp_opt { * Slow start and congestion control (see also Nagle, and Karn & Partridge) */ __u32 snd_ssthresh; /* Slow start size threshold */ + __u16 snd_cwnd_cnt; /* Linear increase counter */ __u8 dup_acks; /* Consequetive duplicate acks seen from other end */ __u8 delayed_acks; __u16 user_mss; /* mss requested by user in ioctl */ @@ -531,7 +535,7 @@ struct proto { struct sock *sklist_prev; void (*close)(struct sock *sk, - unsigned long timeout); + long timeout); int (*connect)(struct sock *sk, struct sockaddr *uaddr, int addr_len); @@ -841,20 +845,6 @@ extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return 0; } -extern __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) -{ - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces - number of warnings when compiling with -W --ANK - */ - if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) - return -ENOMEM; - skb_set_owner_r(skb, sk); - __skb_queue_tail(&sk->receive_queue,skb); - if (!sk->dead) - sk->data_ready(sk,skb->len); - return 0; -} - extern __inline__ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) { /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces @@ -863,7 +853,7 @@ extern __inline__ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) return -ENOMEM; skb_set_owner_r(skb, sk); - __skb_queue_tail(&sk->error_queue,skb); + skb_queue_tail(&sk->error_queue,skb); if (!sk->dead) sk->data_ready(sk,skb->len); return 0; |