summaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_fragment.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-17 13:25:08 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-17 13:25:08 +0000
commit59223edaa18759982db0a8aced0e77457d10c68e (patch)
tree89354903b01fa0a447bffeefe00df3044495db2e /net/ipv4/ip_fragment.c
parentdb7d4daea91e105e3859cf461d7e53b9b77454b2 (diff)
Merge with Linux 2.3.6. Sorry, this isn't tested on silicon, I don't
have a MIPS box at hand.
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r--net/ipv4/ip_fragment.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index f066e6073..29747fee6 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -5,7 +5,7 @@
*
* The IP fragmentation functionality.
*
- * Version: $Id: ip_fragment.c,v 1.40 1999/03/20 23:58:34 davem Exp $
+ * Version: $Id: ip_fragment.c,v 1.41 1999/05/27 00:38:07 davem Exp $
*
* Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox <Alan.Cox@linux.org>
@@ -71,7 +71,8 @@ struct ipq {
#define IPQ_HASHSZ 64
-struct ipq *ipq_hash[IPQ_HASHSZ];
+static struct ipq *ipq_hash[IPQ_HASHSZ];
+static spinlock_t ipfrag_lock = SPIN_LOCK_UNLOCKED;
#define ipqhashfn(id, saddr, daddr, prot) \
((((id) >> 1) ^ (saddr) ^ (daddr) ^ (prot)) & (IPQ_HASHSZ - 1))
@@ -141,7 +142,9 @@ static inline struct ipq *ip_find(struct iphdr *iph, struct dst_entry *dst)
unsigned int hash = ipqhashfn(id, saddr, daddr, protocol);
struct ipq *qp;
- /* Always, we are in a BH context, so no locking. -DaveM */
+ /* We are always in BH context, and protected by the
+ * ipfrag lock.
+ */
for(qp = ipq_hash[hash]; qp; qp = qp->next) {
if(qp->iph->id == id &&
qp->iph->saddr == saddr &&
@@ -158,8 +161,9 @@ static inline struct ipq *ip_find(struct iphdr *iph, struct dst_entry *dst)
* because we completed, reassembled and processed it, or because
* it timed out.
*
- * This is called _only_ from BH contexts, on packet reception
- * processing and from frag queue expiration timers. -DaveM
+ * This is called _only_ from BH contexts with the ipfrag lock held,
+ * on packet reception processing and from frag queue expiration
+ * timers. -DaveM
*/
static void ip_free(struct ipq *qp)
{
@@ -197,6 +201,7 @@ static void ip_expire(unsigned long arg)
{
struct ipq *qp = (struct ipq *) arg;
+ spin_lock(&ipfrag_lock);
if(!qp->fragments)
{
#ifdef IP_EXPIRE_DEBUG
@@ -213,10 +218,13 @@ static void ip_expire(unsigned long arg)
out:
/* Nuke the fragment queue. */
ip_free(qp);
+ spin_lock(&ipfrag_lock);
}
/* Memory limiting on fragments. Evictor trashes the oldest
* fragment queue until we are back under the low threshold.
+ *
+ * We are always called in BH with the ipfrag lock held.
*/
static void ip_evictor(void)
{
@@ -229,9 +237,6 @@ restart:
struct ipq *qp;
if (atomic_read(&ip_frag_mem) <= sysctl_ipfrag_low_thresh)
return;
- /* We are in a BH context, so these queue
- * accesses are safe. -DaveM
- */
qp = ipq_hash[i];
if (qp) {
/* find the oldest queue for this hash bucket */
@@ -283,7 +288,7 @@ static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph)
/* Add this entry to the queue. */
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
- /* We are in a BH context, no locking necessary. -DaveM */
+ /* In a BH context and ipfrag lock is held. -DaveM */
if((qp->next = ipq_hash[hash]) != NULL)
qp->next->pprev = &qp->next;
ipq_hash[hash] = qp;
@@ -421,6 +426,8 @@ struct sk_buff *ip_defrag(struct sk_buff *skb)
ip_statistics.IpReasmReqds++;
+ spin_lock(&ipfrag_lock);
+
/* Start by cleaning up the memory. */
if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
ip_evictor();
@@ -565,6 +572,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb)
out_freequeue:
ip_free(qp);
out_skb:
+ spin_unlock(&ipfrag_lock);
return skb;
}
@@ -574,6 +582,7 @@ out_skb:
out_timer:
mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time); /* ~ 30 seconds */
out:
+ spin_unlock(&ipfrag_lock);
return NULL;
/*