summaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h80
1 files changed, 49 insertions, 31 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2089a9710..d499875de 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,6 +15,8 @@
#define _LINUX_SKBUFF_H
#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/time.h>
#include <asm/atomic.h>
@@ -30,15 +32,19 @@
#define CHECKSUM_UNNECESSARY 2
struct sk_buff_head {
+ /* These two members must be first. */
struct sk_buff * next;
struct sk_buff * prev;
- __u32 qlen; /* Must be same length as a pointer
- for using debugging */
+
+ __u32 qlen;
+ spinlock_t lock;
};
struct sk_buff {
+ /* These two members must be first. */
struct sk_buff * next; /* Next buffer in list */
struct sk_buff * prev; /* Previous buffer in list */
+
struct sk_buff_head * list; /* List we are on */
struct sock *sk; /* Socket we are owned by */
struct timeval stamp; /* Time we arrived */
@@ -245,6 +251,7 @@ extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
{
+ spin_lock_init(&list->lock);
list->prev = (struct sk_buff *)list;
list->next = (struct sk_buff *)list;
list->qlen = 0;
@@ -271,15 +278,13 @@ extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buf
prev->next = newsk;
}
-extern spinlock_t skb_queue_lock;
-
extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
__skb_queue_head(list, newsk);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&list->lock, flags);
}
/*
@@ -304,9 +309,9 @@ extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff
{
unsigned long flags;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
__skb_queue_tail(list, newsk);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&list->lock, flags);
}
/*
@@ -338,9 +343,9 @@ extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
long flags;
struct sk_buff *result;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue(list);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&list->lock, flags);
return result;
}
@@ -367,9 +372,9 @@ extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&old->list->lock, flags);
__skb_insert(newsk, old->prev, old, old->list);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&old->list->lock, flags);
}
/*
@@ -385,9 +390,9 @@ extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&old->list->lock, flags);
__skb_append(old, newsk);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&old->list->lock, flags);
}
/*
@@ -417,12 +422,16 @@ extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *li
extern __inline__ void skb_unlink(struct sk_buff *skb)
{
- unsigned long flags;
+ struct sk_buff_head *list = skb->list;
+
+ if(list) {
+ unsigned long flags;
- spin_lock_irqsave(&skb_queue_lock, flags);
- if(skb->list)
- __skb_unlink(skb, skb->list);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
+ if(skb->list == list)
+ __skb_unlink(skb, skb->list);
+ spin_unlock_irqrestore(&list->lock, flags);
+ }
}
/* XXX: more streamlined implementation */
@@ -439,9 +448,9 @@ extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
long flags;
struct sk_buff *result;
- spin_lock_irqsave(&skb_queue_lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
result = __skb_dequeue_tail(list);
- spin_unlock_irqrestore(&skb_queue_lock, flags);
+ spin_unlock_irqrestore(&list->lock, flags);
return result;
}
@@ -449,29 +458,38 @@ extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
* Add data to an sk_buff
*/
+extern __inline__ unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ return tmp;
+}
+
extern __inline__ unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp=skb->tail;
skb->tail+=len;
skb->len+=len;
- if(skb->tail>skb->end)
- {
- __label__ here;
- skb_over_panic(skb, len, &&here);
-here: ;
+ if(skb->tail>skb->end) {
+ skb_over_panic(skb, len, current_text_addr());
}
return tmp;
}
+extern __inline__ unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ return skb->data;
+}
+
extern __inline__ unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data-=len;
skb->len+=len;
- if(skb->data<skb->head)
- {
- __label__ here;
- skb_under_panic(skb, len, &&here);
-here: ;
+ if(skb->data<skb->head) {
+ skb_under_panic(skb, len, current_text_addr());
}
return skb->data;
}