diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-10-05 01:18:40 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-10-05 01:18:40 +0000 |
commit | 012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch) | |
tree | 87efc733f9b164e8c85c0336f92c8fb7eff6d183 /include/net | |
parent | 625a1589d3d6464b5d90b8a0918789e3afffd220 (diff) |
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found
that this kernel will only boot SMP on Origin; the UP kernel freeze
soon after bootup with SCSI timeout messages. I commit this anyway
since I found that the last CVS versions had the same problem.
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/addrconf.h | 18 | ||||
-rw-r--r-- | include/net/checksum.h | 2 | ||||
-rw-r--r-- | include/net/dn_route.h | 6 | ||||
-rw-r--r-- | include/net/dsfield.h | 10 | ||||
-rw-r--r-- | include/net/dst.h | 16 | ||||
-rw-r--r-- | include/net/if_inet6.h | 4 | ||||
-rw-r--r-- | include/net/ip.h | 12 | ||||
-rw-r--r-- | include/net/ip6_fib.h | 4 | ||||
-rw-r--r-- | include/net/ip6_route.h | 2 | ||||
-rw-r--r-- | include/net/ip_fib.h | 18 | ||||
-rw-r--r-- | include/net/ipv6.h | 14 | ||||
-rw-r--r-- | include/net/ndisc.h | 2 | ||||
-rw-r--r-- | include/net/neighbour.h | 16 | ||||
-rw-r--r-- | include/net/pkt_cls.h | 2 | ||||
-rw-r--r-- | include/net/pkt_sched.h | 18 | ||||
-rw-r--r-- | include/net/profile.h | 32 | ||||
-rw-r--r-- | include/net/route.h | 20 | ||||
-rw-r--r-- | include/net/snmp.h | 53 | ||||
-rw-r--r-- | include/net/sock.h | 61 | ||||
-rw-r--r-- | include/net/tcp.h | 64 | ||||
-rw-r--r-- | include/net/x25.h | 2 |
21 files changed, 198 insertions, 178 deletions
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 6a5b87ee6..465b452ae 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -87,7 +87,7 @@ extern int ipv6_chk_mcast_addr(struct net_device *dev, extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); -extern __inline__ struct inet6_dev * +static inline struct inet6_dev * __in6_dev_get(struct net_device *dev) { return (struct inet6_dev *)dev->ip6_ptr; @@ -95,7 +95,7 @@ __in6_dev_get(struct net_device *dev) extern rwlock_t addrconf_lock; -extern __inline__ struct inet6_dev * +static inline struct inet6_dev * in6_dev_get(struct net_device *dev) { struct inet6_dev *idev = NULL; @@ -109,7 +109,7 @@ in6_dev_get(struct net_device *dev) extern void in6_dev_finish_destroy(struct inet6_dev *idev); -extern __inline__ void +static inline void in6_dev_put(struct inet6_dev *idev) { if (atomic_dec_and_test(&idev->refcnt)) @@ -122,7 +122,7 @@ in6_dev_put(struct inet6_dev *idev) extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); -extern __inline__ void in6_ifa_put(struct inet6_ifaddr *ifp) +static inline void in6_ifa_put(struct inet6_ifaddr *ifp) { if (atomic_dec_and_test(&ifp->refcnt)) inet6_ifa_finish_destroy(ifp); @@ -157,7 +157,7 @@ static __inline__ u8 ipv6_addr_hash(struct in6_addr *addr) * compute link-local solicited-node multicast address */ -extern __inline__ void addrconf_addr_solict_mult_old(struct in6_addr *addr, +static inline void addrconf_addr_solict_mult_old(struct in6_addr *addr, struct in6_addr *solicited) { ipv6_addr_set(solicited, @@ -165,7 +165,7 @@ extern __inline__ void addrconf_addr_solict_mult_old(struct in6_addr *addr, __constant_htonl(0x1), addr->s6_addr32[3]); } -extern __inline__ void addrconf_addr_solict_mult_new(struct in6_addr *addr, +static inline void addrconf_addr_solict_mult_new(struct in6_addr *addr, struct in6_addr *solicited) { ipv6_addr_set(solicited, @@ -175,21 +175,21 @@ extern __inline__ void addrconf_addr_solict_mult_new(struct in6_addr *addr, } -extern __inline__ void ipv6_addr_all_nodes(struct in6_addr *addr) +static inline void ipv6_addr_all_nodes(struct in6_addr *addr) { ipv6_addr_set(addr, __constant_htonl(0xFF020000), 0, 0, __constant_htonl(0x1)); } -extern __inline__ void ipv6_addr_all_routers(struct in6_addr *addr) +static inline void ipv6_addr_all_routers(struct in6_addr *addr) { ipv6_addr_set(addr, __constant_htonl(0xFF020000), 0, 0, __constant_htonl(0x2)); } -extern __inline__ int ipv6_addr_is_multicast(struct in6_addr *addr) +static inline int ipv6_addr_is_multicast(struct in6_addr *addr) { return (addr->s6_addr32[0] & __constant_htonl(0xFF000000)) == __constant_htonl(0xFF000000); } diff --git a/include/net/checksum.h b/include/net/checksum.h index 6793f196f..76cf27e77 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -93,7 +93,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, #endif #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER -extern __inline__ +static inline unsigned int csum_and_copy_from_user (const char *src, char *dst, int len, int sum, int *err_ptr) { diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 7c7d3dd0e..30ec0d245 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -91,12 +91,12 @@ extern void dn_route_cleanup(void); #include <net/sock.h> #include <linux/if_arp.h> -extern __inline__ void dn_rt_send(struct sk_buff *skb) +static inline void dn_rt_send(struct sk_buff *skb) { dev_queue_xmit(skb); } -extern __inline__ void dn_rt_finish_output(struct sk_buff *skb, char *dst) +static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst) { struct net_device *dev = skb->dev; @@ -110,7 +110,7 @@ extern __inline__ void dn_rt_finish_output(struct sk_buff *skb, char *dst) kfree_skb(skb); } -extern __inline__ void dn_nsp_send(struct sk_buff *skb) +static inline void dn_nsp_send(struct sk_buff *skb) { struct sock *sk = skb->sk; struct dn_scp *scp = &sk->protinfo.dn; diff --git a/include/net/dsfield.h b/include/net/dsfield.h index 778b6baea..80bb84c7c 100644 --- a/include/net/dsfield.h +++ b/include/net/dsfield.h @@ -12,19 +12,19 @@ #include <asm/byteorder.h> -extern __inline__ __u8 ipv4_get_dsfield(struct iphdr *iph) +static inline __u8 ipv4_get_dsfield(struct iphdr *iph) { return iph->tos; } -extern __inline__ __u8 ipv6_get_dsfield(struct ipv6hdr *ipv6h) +static inline __u8 ipv6_get_dsfield(struct ipv6hdr *ipv6h) { return ntohs(*(__u16 *) ipv6h) >> 4; } -extern __inline__ void ipv4_change_dsfield(struct iphdr *iph,__u8 mask, +static inline void ipv4_change_dsfield(struct iphdr *iph,__u8 mask, __u8 value) { __u32 check = ntohs(iph->check); @@ -40,7 +40,7 @@ extern __inline__ void ipv4_change_dsfield(struct iphdr *iph,__u8 mask, } -extern __inline__ void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask, +static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask, __u8 value) { __u16 tmp; @@ -53,7 +53,7 @@ extern __inline__ void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask, #if 0 /* put this later into asm-i386 or such ... */ -extern __inline__ void ip_change_dsfield(struct iphdr *iph,__u16 dsfield) +static inline void ip_change_dsfield(struct iphdr *iph,__u16 dsfield) { __u16 check; diff --git a/include/net/dst.h b/include/net/dst.h index 253d72a22..d81eb07bb 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -88,12 +88,12 @@ struct dst_ops #ifdef __KERNEL__ -extern __inline__ void dst_hold(struct dst_entry * dst) +static inline void dst_hold(struct dst_entry * dst) { atomic_inc(&dst->__refcnt); } -extern __inline__ +static inline struct dst_entry * dst_clone(struct dst_entry * dst) { if (dst) @@ -101,7 +101,7 @@ struct dst_entry * dst_clone(struct dst_entry * dst) return dst; } -extern __inline__ +static inline void dst_release(struct dst_entry * dst) { if (dst) @@ -112,7 +112,7 @@ extern void * dst_alloc(struct dst_ops * ops); extern void __dst_free(struct dst_entry * dst); extern void dst_destroy(struct dst_entry * dst); -extern __inline__ +static inline void dst_free(struct dst_entry * dst) { if (dst->obsolete > 1) @@ -124,27 +124,27 @@ void dst_free(struct dst_entry * dst) __dst_free(dst); } -extern __inline__ void dst_confirm(struct dst_entry *dst) +static inline void dst_confirm(struct dst_entry *dst) { if (dst) neigh_confirm(dst->neighbour); } -extern __inline__ void dst_negative_advice(struct dst_entry **dst_p) +static inline void dst_negative_advice(struct dst_entry **dst_p) { struct dst_entry * dst = *dst_p; if (dst && dst->ops->negative_advice) *dst_p = dst->ops->negative_advice(dst); } -extern __inline__ void dst_link_failure(struct sk_buff *skb) +static inline void dst_link_failure(struct sk_buff *skb) { struct dst_entry * dst = skb->dst; if (dst && dst->ops && dst->ops->link_failure) dst->ops->link_failure(skb); } -extern __inline__ void dst_set_expires(struct dst_entry *dst, int timeout) +static inline void dst_set_expires(struct dst_entry *dst, int timeout) { unsigned long expires = jiffies + timeout; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 2ea636834..921268e00 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -108,7 +108,7 @@ struct inet6_dev extern struct ipv6_devconf ipv6_devconf; -extern __inline__ void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) +static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) { /* * +-------+-------+-------+-------+-------+-------+ @@ -122,7 +122,7 @@ extern __inline__ void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); } -extern __inline__ void ipv6_tr_mc_map(struct in6_addr *addr, char *buf) +static inline void ipv6_tr_mc_map(struct in6_addr *addr, char *buf) { /* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */ diff --git a/include/net/ip.h b/include/net/ip.h index 45a85abfe..1ba956ac8 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -115,7 +115,7 @@ extern int ip_build_xmit(struct sock *sk, * multicast packets. */ -extern __inline__ void ip_tr_mc_map(u32 addr, char *buf) +static inline void ip_tr_mc_map(u32 addr, char *buf) { buf[0]=0xC0; buf[1]=0x00; @@ -159,7 +159,7 @@ extern int sysctl_local_port_range[2]; extern int sysctl_ip_default_ttl; #ifdef CONFIG_INET -extern __inline__ int ip_send(struct sk_buff *skb) +static inline int ip_send(struct sk_buff *skb) { if (skb->len > skb->dst->pmtu) return ip_fragment(skb, ip_finish_output); @@ -169,7 +169,7 @@ extern __inline__ int ip_send(struct sk_buff *skb) /* The function in 2.2 was invalid, producing wrong result for * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ -extern __inline__ +static inline int ip_decrease_ttl(struct iphdr *iph) { u32 check = iph->check; @@ -178,7 +178,7 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } -extern __inline__ +static inline int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) { return (sk->protinfo.af_inet.pmtudisc == IP_PMTUDISC_DO || @@ -188,7 +188,7 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst); -extern __inline__ void ip_select_ident(struct iphdr *iph, struct dst_entry *dst) +static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst) { if (iph->frag_off&__constant_htons(IP_DF)) iph->id = 0; @@ -200,7 +200,7 @@ extern __inline__ void ip_select_ident(struct iphdr *iph, struct dst_entry *dst) * Map a multicast IP onto multicast MAC for type ethernet. */ -extern __inline__ void ip_eth_mc_map(u32 addr, char *buf) +static inline void ip_eth_mc_map(u32 addr, char *buf) { addr=ntohl(addr); buf[0]=0x01; diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index e07802554..f8d382f4e 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -96,7 +96,7 @@ struct fib6_walker_t extern struct fib6_walker_t fib6_walker_list; extern rwlock_t fib6_walker_lock; -extern __inline__ void fib6_walker_link(struct fib6_walker_t *w) +static inline void fib6_walker_link(struct fib6_walker_t *w) { write_lock_bh(&fib6_walker_lock); w->next = fib6_walker_list.next; @@ -106,7 +106,7 @@ extern __inline__ void fib6_walker_link(struct fib6_walker_t *w) write_unlock_bh(&fib6_walker_lock); } -extern __inline__ void fib6_walker_unlink(struct fib6_walker_t *w) +static inline void fib6_walker_unlink(struct fib6_walker_t *w) { write_lock_bh(&fib6_walker_lock); w->next->prev = w->prev; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index cd6097d45..2ea9eefcb 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -94,7 +94,7 @@ extern rwlock_t rt6_lock; * For UDP/RAW sockets this is done on udp_connect. */ -extern __inline__ void ip6_dst_store(struct sock *sk, struct dst_entry *dst, +static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, struct in6_addr *daddr) { struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index afcec35f6..804f34927 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -140,19 +140,19 @@ struct fib_table extern struct fib_table *local_table; extern struct fib_table *main_table; -extern __inline__ struct fib_table *fib_get_table(int id) +static inline struct fib_table *fib_get_table(int id) { if (id != RT_TABLE_LOCAL) return main_table; return local_table; } -extern __inline__ struct fib_table *fib_new_table(int id) +static inline struct fib_table *fib_new_table(int id) { return fib_get_table(id); } -extern __inline__ int fib_lookup(const struct rt_key *key, struct fib_result *res) +static inline int fib_lookup(const struct rt_key *key, struct fib_result *res) { if (local_table->tb_lookup(local_table, key, res) && main_table->tb_lookup(main_table, key, res)) @@ -160,7 +160,7 @@ extern __inline__ int fib_lookup(const struct rt_key *key, struct fib_result *re return 0; } -extern __inline__ void fib_select_default(const struct rt_key *key, struct fib_result *res) +static inline void fib_select_default(const struct rt_key *key, struct fib_result *res) { if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) main_table->tb_select_default(main_table, key, res); @@ -175,7 +175,7 @@ extern int fib_lookup(const struct rt_key *key, struct fib_result *res); extern struct fib_table *__fib_new_table(int id); extern void fib_rule_put(struct fib_rule *r); -extern __inline__ struct fib_table *fib_get_table(int id) +static inline struct fib_table *fib_get_table(int id) { if (id == 0) id = RT_TABLE_MAIN; @@ -183,7 +183,7 @@ extern __inline__ struct fib_table *fib_get_table(int id) return fib_tables[id]; } -extern __inline__ struct fib_table *fib_new_table(int id) +static inline struct fib_table *fib_new_table(int id) { if (id == 0) id = RT_TABLE_MAIN; @@ -241,7 +241,7 @@ extern u32 fib_rules_policy(u32 saddr, struct fib_result *res, unsigned *flags); extern void fib_rules_init(void); #endif -extern __inline__ void fib_combine_itag(u32 *itag, struct fib_result *res) +static inline void fib_combine_itag(u32 *itag, struct fib_result *res) { #ifdef CONFIG_NET_CLS_ROUTE #ifdef CONFIG_IP_MULTIPLE_TABLES @@ -259,13 +259,13 @@ extern __inline__ void fib_combine_itag(u32 *itag, struct fib_result *res) extern void free_fib_info(struct fib_info *fi); -extern __inline__ void fib_info_put(struct fib_info *fi) +static inline void fib_info_put(struct fib_info *fi) { if (atomic_dec_and_test(&fi->fib_clntref)) free_fib_info(fi); } -extern __inline__ void fib_res_put(struct fib_result *res) +static inline void fib_res_put(struct fib_result *res) { if (res->fi) fib_info_put(res->fi); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 235ae404d..60708b5ee 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -4,7 +4,7 @@ * Authors: * Pedro Roque <roque@di.fc.ul.pt> * - * $Id: ipv6.h,v 1.21 2000/07/07 22:29:42 davem Exp $ + * $Id: ipv6.h,v 1.22 2000/09/18 05:54:13 davem Exp $ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -169,7 +169,7 @@ extern int ipv6_flowlabel_opt(struct sock *sk, char *optval, int optlen); extern void ip6_flowlabel_init(void); extern void ip6_flowlabel_cleanup(void); -extern __inline__ void fl6_sock_release(struct ip6_flowlabel *fl) +static inline void fl6_sock_release(struct ip6_flowlabel *fl) { if (fl) atomic_dec(&fl->users); @@ -206,23 +206,23 @@ typedef int (*inet_getfrag_t) (const void *data, extern int ipv6_addr_type(struct in6_addr *addr); -extern __inline__ int ipv6_addr_scope(struct in6_addr *addr) +static inline int ipv6_addr_scope(struct in6_addr *addr) { return ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK; } -extern __inline__ int ipv6_addr_cmp(struct in6_addr *a1, struct in6_addr *a2) +static inline int ipv6_addr_cmp(struct in6_addr *a1, struct in6_addr *a2) { return memcmp((void *) a1, (void *) a2, sizeof(struct in6_addr)); } -extern __inline__ void ipv6_addr_copy(struct in6_addr *a1, struct in6_addr *a2) +static inline void ipv6_addr_copy(struct in6_addr *a1, struct in6_addr *a2) { memcpy((void *) a1, (void *) a2, sizeof(struct in6_addr)); } #ifndef __HAVE_ARCH_ADDR_SET -extern __inline__ void ipv6_addr_set(struct in6_addr *addr, +static inline void ipv6_addr_set(struct in6_addr *addr, __u32 w1, __u32 w2, __u32 w3, __u32 w4) { @@ -233,7 +233,7 @@ extern __inline__ void ipv6_addr_set(struct in6_addr *addr, } #endif -extern __inline__ int ipv6_addr_any(struct in6_addr *a) +static inline int ipv6_addr_any(struct in6_addr *a) { return ((a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | a->s6_addr32[3] ) == 0); diff --git a/include/net/ndisc.h b/include/net/ndisc.h index d4f99ff72..1ef3341cd 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -101,7 +101,7 @@ extern int igmp6_event_report(struct sk_buff *skb, extern void igmp6_cleanup(void); -extern __inline__ struct neighbour * ndisc_get_neigh(struct net_device *dev, struct in6_addr *addr) +static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, struct in6_addr *addr) { if (dev) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index c8490705f..1a60139a3 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -212,13 +212,13 @@ extern void neigh_sysctl_unregister(struct neigh_parms *p); * Neighbour references */ -extern __inline__ void neigh_release(struct neighbour *neigh) +static inline void neigh_release(struct neighbour *neigh) { if (atomic_dec_and_test(&neigh->refcnt)) neigh_destroy(neigh); } -extern __inline__ struct neighbour * neigh_clone(struct neighbour *neigh) +static inline struct neighbour * neigh_clone(struct neighbour *neigh) { if (neigh) atomic_inc(&neigh->refcnt); @@ -227,23 +227,23 @@ extern __inline__ struct neighbour * neigh_clone(struct neighbour *neigh) #define neigh_hold(n) atomic_inc(&(n)->refcnt) -extern __inline__ void neigh_confirm(struct neighbour *neigh) +static inline void neigh_confirm(struct neighbour *neigh) { if (neigh) neigh->confirmed = jiffies; } -extern __inline__ int neigh_is_connected(struct neighbour *neigh) +static inline int neigh_is_connected(struct neighbour *neigh) { return neigh->nud_state&NUD_CONNECTED; } -extern __inline__ int neigh_is_valid(struct neighbour *neigh) +static inline int neigh_is_valid(struct neighbour *neigh) { return neigh->nud_state&NUD_VALID; } -extern __inline__ int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) +static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) { neigh->used = jiffies; if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) @@ -251,7 +251,7 @@ extern __inline__ int neigh_event_send(struct neighbour *neigh, struct sk_buff * return 0; } -extern __inline__ struct neighbour * +static inline struct neighbour * __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) { struct neighbour *n = neigh_lookup(tbl, pkey, dev); @@ -263,7 +263,7 @@ __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev return IS_ERR(n) ? NULL : n; } -extern __inline__ struct neighbour * +static inline struct neighbour * __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, struct net_device *dev) { diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f9c80dd0f..cb2bde236 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -63,7 +63,7 @@ struct tcf_proto_ops specific classifiers. */ -extern __inline__ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) +static inline int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { int err = 0; u32 protocol = skb->protocol; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 2089faac1..58aa70d2c 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -104,32 +104,32 @@ struct qdisc_rate_table int refcnt; }; -extern __inline__ void sch_tree_lock(struct Qdisc *q) +static inline void sch_tree_lock(struct Qdisc *q) { write_lock(&qdisc_tree_lock); spin_lock_bh(&q->dev->queue_lock); } -extern __inline__ void sch_tree_unlock(struct Qdisc *q) +static inline void sch_tree_unlock(struct Qdisc *q) { spin_unlock_bh(&q->dev->queue_lock); write_unlock(&qdisc_tree_lock); } -extern __inline__ void tcf_tree_lock(struct tcf_proto *tp) +static inline void tcf_tree_lock(struct tcf_proto *tp) { write_lock(&qdisc_tree_lock); spin_lock_bh(&tp->q->dev->queue_lock); } -extern __inline__ void tcf_tree_unlock(struct tcf_proto *tp) +static inline void tcf_tree_unlock(struct tcf_proto *tp) { spin_unlock_bh(&tp->q->dev->queue_lock); write_unlock(&qdisc_tree_lock); } -extern __inline__ unsigned long +static inline unsigned long cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl) { unsigned long old_cl; @@ -141,7 +141,7 @@ cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl) return old_cl; } -extern __inline__ unsigned long +static inline unsigned long __cls_set_class(unsigned long *clp, unsigned long cl) { unsigned long old_cl; @@ -401,7 +401,7 @@ extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr * extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p); extern int tcf_police(struct sk_buff *skb, struct tcf_police *p); -extern __inline__ void tcf_police_release(struct tcf_police *p) +static inline void tcf_police_release(struct tcf_police *p) { if (p && --p->refcnt == 0) tcf_police_destroy(p); @@ -433,7 +433,7 @@ int pktsched_init(void); extern int qdisc_restart(struct net_device *dev); -extern __inline__ void qdisc_run(struct net_device *dev) +static inline void qdisc_run(struct net_device *dev) { while (!netif_queue_stopped(dev) && qdisc_restart(dev)<0) @@ -443,7 +443,7 @@ extern __inline__ void qdisc_run(struct net_device *dev) /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ -extern __inline__ unsigned psched_mtu(struct net_device *dev) +static inline unsigned psched_mtu(struct net_device *dev) { unsigned mtu = dev->mtu; return dev->hard_header ? mtu + dev->hard_header_len : mtu; diff --git a/include/net/profile.h b/include/net/profile.h index ebba04ae9..5393f0d04 100644 --- a/include/net/profile.h +++ b/include/net/profile.h @@ -31,12 +31,12 @@ extern void net_profile_irq_adjust(struct timeval *entered, struct timeval* leav #ifdef CONFIG_X86_TSC -extern __inline__ void net_profile_stamp(struct timeval *pstamp) +static inline void net_profile_stamp(struct timeval *pstamp) { rdtsc(pstamp->tv_usec, pstamp->tv_sec); } -extern __inline__ void net_profile_accumulate(struct timeval *entered, +static inline void net_profile_accumulate(struct timeval *entered, struct timeval *leaved, struct timeval *acc) { @@ -52,7 +52,7 @@ extern __inline__ void net_profile_accumulate(struct timeval *entered, "0" (acc->tv_usec), "1" (acc->tv_sec)); } -extern __inline__ void net_profile_sub(struct timeval *sub, +static inline void net_profile_sub(struct timeval *sub, struct timeval *acc) { __asm__ __volatile__ ("subl %2,%0\n\t" @@ -62,7 +62,7 @@ extern __inline__ void net_profile_sub(struct timeval *sub, "0" (acc->tv_usec), "1" (acc->tv_sec)); } -extern __inline__ void net_profile_add(struct timeval *add, +static inline void net_profile_add(struct timeval *add, struct timeval *acc) { __asm__ __volatile__ ("addl %2,%0\n\t" @@ -80,7 +80,7 @@ extern long alpha_hi; /* On alpha cycle counter has only 32 bits :-( :-( */ -extern __inline__ void net_profile_stamp(struct timeval *pstamp) +static inline void net_profile_stamp(struct timeval *pstamp) { __u32 result; __asm__ __volatile__ ("rpcc %0" : "r="(result)); @@ -91,7 +91,7 @@ extern __inline__ void net_profile_stamp(struct timeval *pstamp) pstamp->tv_usec = alpha_lo; } -extern __inline__ void net_profile_accumulate(struct timeval *entered, +static inline void net_profile_accumulate(struct timeval *entered, struct timeval *leaved, struct timeval *acc) { @@ -113,7 +113,7 @@ extern __inline__ void net_profile_accumulate(struct timeval *entered, acc->tv_usec = usecs; } -extern __inline__ void net_profile_sub(struct timeval *entered, +static inline void net_profile_sub(struct timeval *entered, struct timeval *leaved) { time_t usecs = leaved->tv_usec - entered->tv_usec; @@ -127,7 +127,7 @@ extern __inline__ void net_profile_sub(struct timeval *entered, leaved->tv_usec = usecs; } -extern __inline__ void net_profile_add(struct timeval *entered, struct timeval *leaved) +static inline void net_profile_add(struct timeval *entered, struct timeval *leaved) { time_t usecs = leaved->tv_usec + entered->tv_usec; time_t secs = leaved->tv_sec + entered->tv_sec; @@ -143,18 +143,18 @@ extern __inline__ void net_profile_add(struct timeval *entered, struct timeval #else -extern __inline__ void net_profile_stamp(struct timeval *pstamp) +static inline void net_profile_stamp(struct timeval *pstamp) { /* Not "fast" counterpart! On architectures without cpu clock "fast" routine is absolutely useless in this situation. do_gettimeofday still says something on slow-slow-slow - boxes, though it eats more cpu time than the sobject of + boxes, though it eats more cpu time than the subject of investigation :-) :-) */ do_gettimeofday(pstamp); } -extern __inline__ void net_profile_accumulate(struct timeval *entered, +static inline void net_profile_accumulate(struct timeval *entered, struct timeval *leaved, struct timeval *acc) { @@ -176,7 +176,7 @@ extern __inline__ void net_profile_accumulate(struct timeval *entered, acc->tv_usec = usecs; } -extern __inline__ void net_profile_sub(struct timeval *entered, +static inline void net_profile_sub(struct timeval *entered, struct timeval *leaved) { time_t usecs = leaved->tv_usec - entered->tv_usec; @@ -190,7 +190,7 @@ extern __inline__ void net_profile_sub(struct timeval *entered, leaved->tv_usec = usecs; } -extern __inline__ void net_profile_add(struct timeval *entered, struct timeval *leaved) +static inline void net_profile_add(struct timeval *entered, struct timeval *leaved) { time_t usecs = leaved->tv_usec + entered->tv_usec; time_t secs = leaved->tv_sec + entered->tv_sec; @@ -207,7 +207,7 @@ extern __inline__ void net_profile_add(struct timeval *entered, struct timeval #endif -extern __inline__ void net_profile_enter(struct net_profile_slot *s) +static inline void net_profile_enter(struct net_profile_slot *s) { unsigned long flags; @@ -220,7 +220,7 @@ extern __inline__ void net_profile_enter(struct net_profile_slot *s) restore_flags(flags); } -extern __inline__ void net_profile_leave_irq(struct net_profile_slot *s) +static inline void net_profile_leave_irq(struct net_profile_slot *s) { unsigned long flags; @@ -241,7 +241,7 @@ extern __inline__ void net_profile_leave_irq(struct net_profile_slot *s) restore_flags(flags); } -extern __inline__ void net_profile_leave(struct net_profile_slot *s) +static inline void net_profile_leave(struct net_profile_slot *s) { unsigned long flags; save_flags(flags); diff --git a/include/net/route.h b/include/net/route.h index 20536ca63..f6ce04008 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -94,12 +94,13 @@ struct ip_rt_acct extern struct ip_rt_acct *ip_rt_acct; +struct in_device; extern void ip_rt_init(void); extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw, u32 src, u8 tos, struct net_device *dev); extern void ip_rt_advice(struct rtable **rp, int advice); extern void rt_cache_flush(int how); -extern int ip_route_output(struct rtable **, u32 dst, u32 src, u32 tos, int oif); +extern int ip_route_output_key(struct rtable **, const struct rt_key *key); extern int ip_route_input(struct sk_buff*, u32 dst, u32 src, u8 tos, struct net_device *devin); extern unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu); extern void ip_rt_update_pmtu(struct dst_entry *dst, unsigned mtu); @@ -111,8 +112,17 @@ extern int ip_rt_ioctl(unsigned int cmd, void *arg); extern void ip_rt_get_source(u8 *src, struct rtable *rt); extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb); +/* Deprecated: use ip_route_output_key directly */ +static inline int ip_route_output(struct rtable **rp, + u32 daddr, u32 saddr, u32 tos, int oif) +{ + struct rt_key key = { dst:daddr, src:saddr, oif:oif, tos:tos }; + + return ip_route_output_key(rp, &key); +} + -extern __inline__ void ip_rt_put(struct rtable * rt) +static inline void ip_rt_put(struct rtable * rt) { if (rt) dst_release(&rt->u.dst); @@ -127,12 +137,12 @@ extern __inline__ void ip_rt_put(struct rtable * rt) extern __u8 ip_tos2prio[16]; -extern __inline__ char rt_tos2priority(u8 tos) +static inline char rt_tos2priority(u8 tos) { return ip_tos2prio[IPTOS_TOS(tos)>>1]; } -extern __inline__ int ip_route_connect(struct rtable **rp, u32 dst, u32 src, u32 tos, int oif) +static inline int ip_route_connect(struct rtable **rp, u32 dst, u32 src, u32 tos, int oif) { int err; err = ip_route_output(rp, dst, src, tos, oif); @@ -147,7 +157,7 @@ extern __inline__ int ip_route_connect(struct rtable **rp, u32 dst, u32 src, u32 extern void rt_bind_peer(struct rtable *rt, int create); -extern __inline__ struct inet_peer *rt_get_peer(struct rtable *rt) +static inline struct inet_peer *rt_get_peer(struct rtable *rt) { if (rt->peer) return rt->peer; diff --git a/include/net/snmp.h b/include/net/snmp.h index 2bd127299..03b0dfa99 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -14,17 +14,34 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * + * $Id: snmp.h,v 1.17 2000/09/21 01:31:50 davem Exp $ + * */ #ifndef _SNMP_H #define _SNMP_H + +#include <linux/cache.h> /* * We use all unsigned longs. Linux will soon be so reliable that even these * will rapidly get too small 8-). Seriously consider the IpInReceives count * on the 20Gb/s + networks people expect in a few years time! */ - + +/* + * The rule for padding: + * Best is power of two because then the right structure can be found by a simple + * shift. The structure should be always cache line aligned. + * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add instructions + * to emulate multiply in case it is not power-of-two. Currently n is always <=3 for + * all sizes so simple cache line alignment is enough. + * + * The best solution would be a global CPU local area , especially on 64 and 128byte + * cacheline machine it makes a *lot* of sense -AK + */ + + struct ip_mib { unsigned long IpInReceives; @@ -44,8 +61,8 @@ struct ip_mib unsigned long IpFragOKs; unsigned long IpFragFails; unsigned long IpFragCreates; - unsigned long __pad[32-19]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; struct ipv6_mib { @@ -71,8 +88,8 @@ struct ipv6_mib unsigned long Ip6FragCreates; unsigned long Ip6InMcastPkts; unsigned long Ip6OutMcastPkts; - unsigned long __pad[32-22]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; struct icmp_mib { @@ -102,8 +119,8 @@ struct icmp_mib unsigned long IcmpOutTimestampReps; unsigned long IcmpOutAddrMasks; unsigned long IcmpOutAddrMaskReps; - unsigned long __pad[32-26]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; struct icmpv6_mib { @@ -140,8 +157,8 @@ struct icmpv6_mib unsigned long Icmp6OutRedirects; unsigned long Icmp6OutGroupMembResponses; unsigned long Icmp6OutGroupMembReductions; - unsigned long __pad[32-28]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; struct tcp_mib { @@ -159,8 +176,8 @@ struct tcp_mib unsigned long TcpRetransSegs; unsigned long TcpInErrs; unsigned long TcpOutRsts; - unsigned long __pad[16-14]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; struct udp_mib { @@ -168,8 +185,8 @@ struct udp_mib unsigned long UdpNoPorts; unsigned long UdpInErrors; unsigned long UdpOutDatagrams; - unsigned long __pad[0]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; struct linux_mib { @@ -237,9 +254,15 @@ struct linux_mib unsigned long TCPAbortOnLinger; unsigned long TCPAbortFailed; unsigned long TCPMemoryPressures; - unsigned long __pad[64-64]; -}; + unsigned long __pad[0]; +} ____cacheline_aligned; + +/* + * FIXME: On x86 and some other CPUs the split into user and softirq parts is not needed because + * addl $1,memory is atomic against interrupts (but atomic_inc would be overkill because of the lock + * cycles). Wants new nonlocked_atomic_inc() primitives -AK + */ #define SNMP_INC_STATS(mib, field) ((mib)[2*smp_processor_id()+!in_softirq()].field++) #define SNMP_INC_STATS_BH(mib, field) ((mib)[2*smp_processor_id()].field++) #define SNMP_INC_STATS_USER(mib, field) ((mib)[2*smp_processor_id()+1].field++) diff --git a/include/net/sock.h b/include/net/sock.h index 38b5549d6..e3527b2f3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -470,7 +470,6 @@ typedef struct { #define sock_lock_init(__sk) \ do { spin_lock_init(&((__sk)->lock.slock)); \ - (__sk)->dst_lock = RW_LOCK_UNLOCKED; \ (__sk)->lock.users = 0; \ init_waitqueue_head(&((__sk)->lock.wq)); \ } while(0); @@ -749,6 +748,7 @@ static void __inline__ sock_prot_dec_use(struct proto *prot) #define SOCK_SNDBUF_LOCK 1 #define SOCK_RCVBUF_LOCK 2 #define SOCK_BINDADDR_LOCK 4 +#define SOCK_BINDPORT_LOCK 8 /* Used by processes to "lock" a socket state, so that @@ -818,7 +818,6 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk, int priority); extern void sock_wfree(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb); -extern unsigned long sock_wspace(struct sock *sk); extern int sock_setsockopt(struct socket *sock, int level, int op, char *optval, @@ -901,7 +900,7 @@ extern void sklist_destroy_socket(struct sock **list, struct sock *sk); * be accepted or 1 if the packet should be tossed. */ -extern __inline__ int sk_filter(struct sk_buff *skb, struct sk_filter *filter) +static inline int sk_filter(struct sk_buff *skb, struct sk_filter *filter) { int pkt_len; @@ -922,7 +921,7 @@ extern __inline__ int sk_filter(struct sk_buff *skb, struct sk_filter *filter) * Remove a filter from a socket and release its resources. */ -extern __inline__ void sk_filter_release(struct sock *sk, struct sk_filter *fp) +static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp) { unsigned int size = sk_filter_len(fp); @@ -932,7 +931,7 @@ extern __inline__ void sk_filter_release(struct sock *sk, struct sk_filter *fp) kfree(fp); } -extern __inline__ void sk_filter_charge(struct sock *sk, struct sk_filter *fp) +static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) { atomic_inc(&fp->refcnt); atomic_add(sk_filter_len(fp), &sk->omem_alloc); @@ -971,7 +970,7 @@ extern __inline__ void sk_filter_charge(struct sock *sk, struct sk_filter *fp) modifications. */ -extern __inline__ void sock_hold(struct sock *sk) +static inline void sock_hold(struct sock *sk) { atomic_inc(&sk->refcnt); } @@ -979,13 +978,13 @@ extern __inline__ void sock_hold(struct sock *sk) /* Ungrab socket in the context, which assumes that socket refcnt cannot hit zero, f.e. it is true in context of any socketcall. */ -extern __inline__ void __sock_put(struct sock *sk) +static inline void __sock_put(struct sock *sk) { atomic_dec(&sk->refcnt); } /* Ungrab socket and destroy it, if it was the last reference. */ -extern __inline__ void sock_put(struct sock *sk) +static inline void sock_put(struct sock *sk) { if (atomic_dec_and_test(&sk->refcnt)) sk_free(sk); @@ -998,7 +997,7 @@ extern __inline__ void sock_put(struct sock *sk) * probably wants some additional cleanups or even continuing * to work with this socket (TCP). */ -extern __inline__ void sock_orphan(struct sock *sk) +static inline void sock_orphan(struct sock *sk) { write_lock_bh(&sk->callback_lock); sk->dead = 1; @@ -1007,7 +1006,7 @@ extern __inline__ void sock_orphan(struct sock *sk) write_unlock_bh(&sk->callback_lock); } -extern __inline__ void sock_graft(struct sock *sk, struct socket *parent) +static inline void sock_graft(struct sock *sk, struct socket *parent) { write_lock_bh(&sk->callback_lock); sk->sleep = &parent->wait; @@ -1036,13 +1035,13 @@ static inline unsigned long sock_i_ino(struct sock *sk) return ino; } -extern __inline__ struct dst_entry * +static inline struct dst_entry * __sk_dst_get(struct sock *sk) { return sk->dst_cache; } -extern __inline__ struct dst_entry * +static inline struct dst_entry * sk_dst_get(struct sock *sk) { struct dst_entry *dst; @@ -1055,7 +1054,7 @@ sk_dst_get(struct sock *sk) return dst; } -extern __inline__ void +static inline void __sk_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old_dst; @@ -1065,7 +1064,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) dst_release(old_dst); } -extern __inline__ void +static inline void sk_dst_set(struct sock *sk, struct dst_entry *dst) { write_lock(&sk->dst_lock); @@ -1073,7 +1072,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst) write_unlock(&sk->dst_lock); } -extern __inline__ void +static inline void __sk_dst_reset(struct sock *sk) { struct dst_entry *old_dst; @@ -1083,7 +1082,7 @@ __sk_dst_reset(struct sock *sk) dst_release(old_dst); } -extern __inline__ void +static inline void sk_dst_reset(struct sock *sk) { write_lock(&sk->dst_lock); @@ -1091,7 +1090,7 @@ sk_dst_reset(struct sock *sk) write_unlock(&sk->dst_lock); } -extern __inline__ struct dst_entry * +static inline struct dst_entry * __sk_dst_check(struct sock *sk, u32 cookie) { struct dst_entry *dst = sk->dst_cache; @@ -1104,7 +1103,7 @@ __sk_dst_check(struct sock *sk, u32 cookie) return dst; } -extern __inline__ struct dst_entry * +static inline struct dst_entry * sk_dst_check(struct sock *sk, u32 cookie) { struct dst_entry *dst = sk_dst_get(sk); @@ -1127,7 +1126,7 @@ sk_dst_check(struct sock *sk, u32 cookie) * packet ever received. */ -extern __inline__ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) +static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) { sock_hold(sk); skb->sk = sk; @@ -1135,14 +1134,14 @@ extern __inline__ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) atomic_add(skb->truesize, &sk->wmem_alloc); } -extern __inline__ void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) +static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { skb->sk = sk; skb->destructor = sock_rfree; atomic_add(skb->truesize, &sk->rmem_alloc); } -extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK @@ -1175,7 +1174,7 @@ extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return 0; } -extern __inline__ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) +static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) { /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK @@ -1193,13 +1192,13 @@ extern __inline__ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) * Recover an error report and clear atomically */ -extern __inline__ int sock_error(struct sock *sk) +static inline int sock_error(struct sock *sk) { int err=xchg(&sk->err,0); return -err; } -extern __inline__ unsigned long sock_wspace(struct sock *sk) +static inline unsigned long sock_wspace(struct sock *sk) { int amt = 0; @@ -1211,7 +1210,7 @@ extern __inline__ unsigned long sock_wspace(struct sock *sk) return amt; } -extern __inline__ void sk_wake_async(struct sock *sk, int how, int band) +static inline void sk_wake_async(struct sock *sk, int how, int band) { if (sk->socket && sk->socket->fasync_list) sock_wake_async(sk->socket, how, band); @@ -1226,27 +1225,27 @@ extern __inline__ void sk_wake_async(struct sock *sk, int how, int band) * Default write policy as shown to user space via poll/select/SIGIO * Kernel internally doesn't use the MIN_WRITE_SPACE threshold. */ -extern __inline__ int sock_writeable(struct sock *sk) +static inline int sock_writeable(struct sock *sk) { return sock_wspace(sk) >= SOCK_MIN_WRITE_SPACE; } -extern __inline__ int gfp_any(void) +static inline int gfp_any(void) { return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } -extern __inline__ long sock_rcvtimeo(struct sock *sk, int noblock) +static inline long sock_rcvtimeo(struct sock *sk, int noblock) { return noblock ? 0 : sk->rcvtimeo; } -extern __inline__ long sock_sndtimeo(struct sock *sk, int noblock) +static inline long sock_sndtimeo(struct sock *sk, int noblock) { return noblock ? 0 : sk->sndtimeo; } -extern __inline__ int sock_rcvlowat(struct sock *sk, int waitall, int len) +static inline int sock_rcvlowat(struct sock *sk, int waitall, int len) { return (waitall ? len : min(sk->rcvlowat, len)) ? : 1; } @@ -1254,7 +1253,7 @@ extern __inline__ int sock_rcvlowat(struct sock *sk, int waitall, int len) /* Alas, with timeout socket operations are not restartable. * Compare this to poll(). */ -extern __inline__ int sock_intr_errno(long timeo) +static inline int sock_intr_errno(long timeo) { return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } diff --git a/include/net/tcp.h b/include/net/tcp.h index d3a63962c..3fdb1e97a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -166,7 +166,7 @@ struct tcp_tw_bucket { extern kmem_cache_t *tcp_timewait_cachep; -extern __inline__ void tcp_tw_put(struct tcp_tw_bucket *tw) +static inline void tcp_tw_put(struct tcp_tw_bucket *tw) { if (atomic_dec_and_test(&tw->refcnt)) { #ifdef INET_REFCNT_DEBUG @@ -495,7 +495,7 @@ extern kmem_cache_t *tcp_openreq_cachep; #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC) #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req) -extern __inline__ void tcp_openreq_free(struct open_request *req) +static inline void tcp_openreq_free(struct open_request *req) { req->class->destructor(req); tcp_openreq_fastfree(req); @@ -656,20 +656,6 @@ static __inline__ void tcp_delack_init(struct tcp_opt *tp) memset(&tp->ack, 0, sizeof(tp->ack)); } -enum tcp_ca_state -{ - TCP_CA_Open = 0, -#define TCPF_CA_Open (1<<TCP_CA_Open) - TCP_CA_Disorder = 1, -#define TCPF_CA_Disorder (1<<TCP_CA_Disorder) - TCP_CA_CWR = 2, -#define TCPF_CA_CWR (1<<TCP_CA_CWR) - TCP_CA_Recovery = 3, -#define TCPF_CA_Recovery (1<<TCP_CA_Recovery) - TCP_CA_Loss = 4 -#define TCPF_CA_Loss (1<<TCP_CA_Loss) -}; - enum tcp_tw_status { @@ -893,7 +879,7 @@ static __inline__ unsigned int tcp_current_mss(struct sock *sk) * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */ -extern __inline__ void tcp_initialize_rcv_mss(struct sock *sk) +static inline void tcp_initialize_rcv_mss(struct sock *sk) { struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; @@ -1034,7 +1020,7 @@ static __inline__ int tcp_packets_in_flight(struct tcp_opt *tp) * one half the current congestion window, but no * less than two segments */ -extern __inline__ __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) +static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) { return max(tp->snd_cwnd>>1, 2); } @@ -1043,7 +1029,7 @@ extern __inline__ __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. */ -extern __inline__ __u32 tcp_current_ssthresh(struct tcp_opt *tp) +static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp) { if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery)) return tp->snd_ssthresh; @@ -1072,7 +1058,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp) } /* Set slow start threshould and cwnd not falling to slow start */ -extern __inline__ void __tcp_enter_cwr(struct tcp_opt *tp) +static inline void __tcp_enter_cwr(struct tcp_opt *tp) { tp->undo_marker = 0; tp->snd_ssthresh = tcp_recalc_ssthresh(tp); @@ -1083,7 +1069,7 @@ extern __inline__ void __tcp_enter_cwr(struct tcp_opt *tp) TCP_ECN_queue_cwr(tp); } -extern __inline__ void tcp_enter_cwr(struct tcp_opt *tp) +static inline void tcp_enter_cwr(struct tcp_opt *tp) { tp->prior_ssthresh = 0; if (tp->ca_state < TCP_CA_CWR) { @@ -1307,6 +1293,8 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) case TCP_CLOSE: sk->prot->unhash(sk); + if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK)) + tcp_put_port(sk); /* fall through */ default: if (oldstate==TCP_ESTABLISHED) @@ -1378,7 +1366,7 @@ static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt * * MAX_SYN_SIZE to match the new maximum number of options that you * can generate. */ -extern __inline__ void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, +static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent) { /* We always get an MSS option. @@ -1418,7 +1406,7 @@ extern __inline__ void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sa * be a multiple of mss if possible. We assume here that mss >= 1. * This MUST be enforced by all callers. */ -extern __inline__ void tcp_select_initial_window(int space, __u32 mss, +static inline void tcp_select_initial_window(int space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, @@ -1477,32 +1465,32 @@ static inline int tcp_win_from_space(int space) } /* Note: caller must be prepared to deal with negative returns */ -extern __inline__ int tcp_space(struct sock *sk) +static inline int tcp_space(struct sock *sk) { return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc)); } -extern __inline__ int tcp_full_space( struct sock *sk) +static inline int tcp_full_space( struct sock *sk) { return tcp_win_from_space(sk->rcvbuf); } -extern __inline__ void tcp_acceptq_removed(struct sock *sk) +static inline void tcp_acceptq_removed(struct sock *sk) { sk->ack_backlog--; } -extern __inline__ void tcp_acceptq_added(struct sock *sk) +static inline void tcp_acceptq_added(struct sock *sk) { sk->ack_backlog++; } -extern __inline__ int tcp_acceptq_is_full(struct sock *sk) +static inline int tcp_acceptq_is_full(struct sock *sk) { return sk->ack_backlog > sk->max_ack_backlog; } -extern __inline__ void tcp_acceptq_queue(struct sock *sk, struct open_request *req, +static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, struct sock *child) { struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; @@ -1528,7 +1516,7 @@ struct tcp_listen_opt struct open_request *syn_table[TCP_SYNQ_HSIZE]; }; -extern __inline__ void +static inline void tcp_synq_removed(struct sock *sk, struct open_request *req) { struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt; @@ -1539,7 +1527,7 @@ tcp_synq_removed(struct sock *sk, struct open_request *req) lopt->qlen_young--; } -extern __inline__ void tcp_synq_added(struct sock *sk) +static inline void tcp_synq_added(struct sock *sk) { struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt; @@ -1548,22 +1536,22 @@ extern __inline__ void tcp_synq_added(struct sock *sk) lopt->qlen_young++; } -extern __inline__ int tcp_synq_len(struct sock *sk) +static inline int tcp_synq_len(struct sock *sk) { return sk->tp_pinfo.af_tcp.listen_opt->qlen; } -extern __inline__ int tcp_synq_young(struct sock *sk) +static inline int tcp_synq_young(struct sock *sk) { return sk->tp_pinfo.af_tcp.listen_opt->qlen_young; } -extern __inline__ int tcp_synq_is_full(struct sock *sk) +static inline int tcp_synq_is_full(struct sock *sk) { return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log; } -extern __inline__ void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, +static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, struct open_request **prev) { write_lock(&tp->syn_wait_lock); @@ -1571,7 +1559,7 @@ extern __inline__ void tcp_synq_unlink(struct tcp_opt *tp, struct open_request * write_unlock(&tp->syn_wait_lock); } -extern __inline__ void tcp_synq_drop(struct sock *sk, struct open_request *req, +static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, struct open_request **prev) { tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev); @@ -1679,7 +1667,7 @@ extern void tcp_listen_wlock(void); * use plain read_(un)lock(&tcp_lhash_lock). */ -extern __inline__ void tcp_listen_lock(void) +static inline void tcp_listen_lock(void) { /* read_lock synchronizes to candidates to writers */ read_lock(&tcp_lhash_lock); @@ -1687,7 +1675,7 @@ extern __inline__ void tcp_listen_lock(void) read_unlock(&tcp_lhash_lock); } -extern __inline__ void tcp_listen_unlock(void) +static inline void tcp_listen_unlock(void) { if (atomic_dec_and_test(&tcp_lhash_users)) wake_up(&tcp_lhash_wait); diff --git a/include/net/x25.h b/include/net/x25.h index 66575c464..fb8346f81 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -188,7 +188,7 @@ extern struct x25_neigh *x25_get_neigh(struct net_device *); extern void x25_link_free(void); /* x25_out.c */ -extern void x25_output(struct sock *, struct sk_buff *); +extern int x25_output(struct sock *, struct sk_buff *); extern void x25_kick(struct sock *); extern void x25_enquiry_response(struct sock *); |