diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-04-05 11:23:36 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-04-05 11:23:36 +0000 |
commit | 4318fbda2a7ee51caafdc4eb1f8028a3f0605142 (patch) | |
tree | cddb50a81d7d1a628cc400519162080c6d87868e /include/linux/skbuff.h | |
parent | 36ea5120664550fae6d31f1c6f695e4f8975cb06 (diff) |
o Merge with Linux 2.1.91.
o First round of bugfixes for the SC/MC CPUs.
o FPU context switch fixes.
o Lazy context switches.
o Faster syscalls.
o Removed dead code.
o Shitloads of other things I forgot ...
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 74 |
1 files changed, 46 insertions, 28 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e19a95fec..d94b40bcc 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -19,9 +19,11 @@ #include <asm/atomic.h> #include <asm/types.h> +#include <asm/spinlock.h> #define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +#define SLAB_SKB /* Slabified skbuffs */ #define CHECKSUM_NONE 0 #define CHECKSUM_HW 1 @@ -88,27 +90,27 @@ struct sk_buff unsigned int len; /* Length of actual data */ unsigned int csum; /* Checksum */ - volatile char used; - unsigned char tries, /* Times tried */ - inclone, /* Inline clone */ + volatile char used; /* Data moved to user and not MSG_PEEK */ + unsigned char is_clone, /* We are a clone */ + cloned, /* head may be cloned (check refcnt to be sure). */ pkt_type, /* Packet class */ pkt_bridged, /* Tracker for bridging */ ip_summed; /* Driver fed us an IP checksum */ - __u32 priority; + __u32 priority; /* Packet queueing priority */ atomic_t users; /* User count - see datagram.c,tcp.c */ unsigned short protocol; /* Packet protocol from driver. */ unsigned short security; /* Security level of packet */ unsigned int truesize; /* Buffer size */ +#ifndef SLAB_SKB atomic_t count; /* reference count */ struct sk_buff *data_skb; /* Link to the actual data skb */ +#endif unsigned char *head; /* Head of buffer */ unsigned char *data; /* Data head pointer */ unsigned char *tail; /* Tail pointer */ unsigned char *end; /* End pointer */ void (*destructor)(struct sk_buff *); /* Destruct function */ -#define SKB_CLONE_ORIG 1 -#define SKB_CLONE_INLINE 2 #if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE) __u32 shapelatency; /* Latency on frame */ @@ -163,6 +165,12 @@ extern int skb_tailroom(struct sk_buff *skb); extern void skb_reserve(struct sk_buff *skb, unsigned int len); extern void skb_trim(struct sk_buff *skb, unsigned int len); +/* Internal */ +extern __inline__ atomic_t *skb_datarefp(struct sk_buff *skb) +{ + return (atomic_t *)(skb->end); +} + extern __inline__ int skb_queue_empty(struct sk_buff_head *list) { return (list->next == (struct sk_buff *) list); @@ -174,9 +182,16 @@ extern __inline__ void kfree_skb(struct sk_buff *skb) __kfree_skb(skb); } +/* Use this if you didn't touch the skb state [for fast switching] */ +extern __inline__ void kfree_skb_fast(struct sk_buff *skb) +{ + if (atomic_dec_and_test(&skb->users)) + kfree_skbmem(skb); +} + extern __inline__ int skb_cloned(struct sk_buff *skb) { - return (atomic_read(&skb->data_skb->count) != 1); + return skb->cloned && atomic_read(skb_datarefp(skb)) != 1; } extern __inline__ int skb_shared(struct sk_buff *skb) @@ -261,14 +276,15 @@ extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buf prev->next = newsk; } +extern spinlock_t skb_queue_lock; + extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) { unsigned long flags; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); __skb_queue_head(list, newsk); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); } /* @@ -293,10 +309,9 @@ extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff { unsigned long flags; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); __skb_queue_tail(list, newsk); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); } /* @@ -328,10 +343,9 @@ extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list) long flags; struct sk_buff *result; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); result = __skb_dequeue(list); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); return result; } @@ -358,10 +372,9 @@ extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk) { unsigned long flags; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); __skb_insert(newsk, old->prev, old, old->list); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); } /* @@ -372,10 +385,9 @@ extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk) { unsigned long flags; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); __skb_insert(newsk, old, old->next, old->list); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); } /* @@ -407,11 +419,10 @@ extern __inline__ void skb_unlink(struct sk_buff *skb) { unsigned long flags; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); if(skb->list) __skb_unlink(skb, skb->list); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); } /* XXX: more streamlined implementation */ @@ -428,10 +439,9 @@ extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) long flags; struct sk_buff *result; - save_flags(flags); - cli(); + spin_lock_irqsave(&skb_queue_lock, flags); result = __skb_dequeue_tail(list); - restore_flags(flags); + spin_unlock_irqrestore(&skb_queue_lock, flags); return result; } @@ -451,7 +461,12 @@ extern __inline__ unsigned char *skb_put(struct sk_buff *skb, unsigned int len) if(skb->tail>skb->end) { __label__ here; +#if 1 + printk(KERN_DEBUG "skbput: over: %p:tail=%p:end=%p:len=%u\n", + &&here, skb->tail, skb->end, len); +#else panic(skb_put_errstr,&&here,len); +#endif here: ; } return tmp; @@ -543,5 +558,8 @@ extern int skb_copy_datagram(struct sk_buff *from, int offset, char *to,int si extern int skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size); extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); +extern void skb_init(void); +extern void skb_add_mtu(int mtu); + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ |