Skip to content

Commit 960fb66

Browse files
Eric Dumazetdavem330
Eric Dumazet
authored andcommitted
netem: add limitation to reordered packets
Fix two netem bugs : 1) When a frame was dropped by tfifo_enqueue(), drop counter was incremented twice. 2) When reordering is triggered, we enqueue a packet without checking queue limit. This can OOM pretty fast when this is repeated enough, since skbs are orphaned, no socket limit can help in this situation. Signed-off-by: Eric Dumazet <[email protected]> Cc: Mark Gordon <[email protected]> Cc: Andreas Terzis <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Hagen Paul Pfeifer <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent b94e52f commit 960fb66

File tree

1 file changed

+15
-27
lines changed

1 file changed

+15
-27
lines changed

net/sched/sch_netem.c

Lines changed: 15 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
331331
return PSCHED_NS2TICKS(ticks);
332332
}
333333

334-
static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
334+
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
335335
{
336336
struct sk_buff_head *list = &sch->q;
337337
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
338-
struct sk_buff *skb;
339-
340-
if (likely(skb_queue_len(list) < sch->limit)) {
341-
skb = skb_peek_tail(list);
342-
/* Optimize for add at tail */
343-
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
344-
return qdisc_enqueue_tail(nskb, sch);
338+
struct sk_buff *skb = skb_peek_tail(list);
345339

346-
skb_queue_reverse_walk(list, skb) {
347-
if (tnext >= netem_skb_cb(skb)->time_to_send)
348-
break;
349-
}
340+
/* Optimize for add at tail */
341+
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
342+
return __skb_queue_tail(list, nskb);
350343

351-
__skb_queue_after(list, skb, nskb);
352-
sch->qstats.backlog += qdisc_pkt_len(nskb);
353-
return NET_XMIT_SUCCESS;
344+
skb_queue_reverse_walk(list, skb) {
345+
if (tnext >= netem_skb_cb(skb)->time_to_send)
346+
break;
354347
}
355348

356-
return qdisc_reshape_fail(nskb, sch);
349+
__skb_queue_after(list, skb, nskb);
357350
}
358351

359352
/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
368361
/* We don't fill cb now as skb_unshare() may invalidate it */
369362
struct netem_skb_cb *cb;
370363
struct sk_buff *skb2;
371-
int ret;
372364
int count = 1;
373365

374366
/* Random duplication */
@@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419411
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
420412
}
421413

414+
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
415+
return qdisc_reshape_fail(skb, sch);
416+
417+
sch->qstats.backlog += qdisc_pkt_len(skb);
418+
422419
cb = netem_skb_cb(skb);
423420
if (q->gap == 0 || /* not doing reordering */
424421
q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
450447

451448
cb->time_to_send = now + delay;
452449
++q->counter;
453-
ret = tfifo_enqueue(skb, sch);
450+
tfifo_enqueue(skb, sch);
454451
} else {
455452
/*
456453
* Do re-ordering by putting one out of N packets at the front
@@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
460457
q->counter = 0;
461458

462459
__skb_queue_head(&sch->q, skb);
463-
sch->qstats.backlog += qdisc_pkt_len(skb);
464460
sch->qstats.requeues++;
465-
ret = NET_XMIT_SUCCESS;
466-
}
467-
468-
if (ret != NET_XMIT_SUCCESS) {
469-
if (net_xmit_drop_count(ret)) {
470-
sch->qstats.drops++;
471-
return ret;
472-
}
473461
}
474462

475463
return NET_XMIT_SUCCESS;

0 commit comments

Comments
 (0)