kernel: merge a pending fix for HFSC warnings/slowdowns (fixes FS#1136)
Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
4e8f1e9f4c
commit
9306bdf31c
@ -13,7 +13,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -465,7 +465,11 @@ static int fq_codel_init(struct Qdisc *s
|
||||
@@ -471,7 +471,11 @@ static int fq_codel_init(struct Qdisc *s
|
||||
|
||||
sch->limit = 10*1024;
|
||||
q->flows_cnt = 1024;
|
||||
|
@ -55,7 +55,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
register_qdisc(&pfifo_head_drop_qdisc_ops);
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -694,7 +694,7 @@ static const struct Qdisc_class_ops fq_c
|
||||
@@ -700,7 +700,7 @@ static const struct Qdisc_class_ops fq_c
|
||||
.walk = fq_codel_walk,
|
||||
};
|
||||
|
||||
@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
.cl_ops = &fq_codel_class_ops,
|
||||
.id = "fq_codel",
|
||||
.priv_size = sizeof(struct fq_codel_sched_data),
|
||||
@@ -709,6 +709,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
|
||||
@@ -715,6 +715,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
|
||||
.dump_stats = fq_codel_dump_stats,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
@ -13,7 +13,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -479,7 +479,11 @@ static int fq_codel_init(struct Qdisc *s
|
||||
@@ -485,7 +485,11 @@ static int fq_codel_init(struct Qdisc *s
|
||||
|
||||
sch->limit = 10*1024;
|
||||
q->flows_cnt = 1024;
|
||||
|
@ -55,7 +55,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
register_qdisc(&pfifo_head_drop_qdisc_ops);
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -709,7 +709,7 @@ static const struct Qdisc_class_ops fq_c
|
||||
@@ -715,7 +715,7 @@ static const struct Qdisc_class_ops fq_c
|
||||
.walk = fq_codel_walk,
|
||||
};
|
||||
|
||||
@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
||||
.cl_ops = &fq_codel_class_ops,
|
||||
.id = "fq_codel",
|
||||
.priv_size = sizeof(struct fq_codel_sched_data),
|
||||
@@ -724,6 +724,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
|
||||
@@ -730,6 +730,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
|
||||
.dump_stats = fq_codel_dump_stats,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
@ -0,0 +1,86 @@
|
||||
From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
|
||||
Date: Mon, 21 Aug 2017 11:14:14 +0300
|
||||
Subject: [PATCH] net_sched/codel: do not defer queue length update
|
||||
|
||||
When codel wants to drop last packet in ->dequeue() it cannot call
|
||||
qdisc_tree_reduce_backlog() right away - it will notify parent qdisc
|
||||
about zero qlen and HTB/HFSC will deactivate class. The same class will
|
||||
be deactivated second time by caller of ->dequeue(). Currently codel and
|
||||
fq_codel defer update. This triggers warning in HFSC when it's qlen != 0
|
||||
but there is no active classes.
|
||||
|
||||
This patch update parent queue length immediately: just temporary increase
|
||||
qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation
|
||||
if we have skb to return.
|
||||
|
||||
This might open another problem in HFSC - now operation peek could fail and
|
||||
deactivate parent class.
|
||||
|
||||
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
|
||||
Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581
|
||||
---
|
||||
|
||||
--- a/net/sched/sch_codel.c
|
||||
+++ b/net/sched/sch_codel.c
|
||||
@@ -95,11 +95,17 @@ static struct sk_buff *codel_qdisc_deque
|
||||
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
|
||||
drop_func, dequeue_func);
|
||||
|
||||
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
- * or HTB crashes. Defer it for next round.
|
||||
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
|
||||
+ * parent class, dequeue in parent qdisc will do the same if we
|
||||
+ * return skb. Temporary increment qlen if we have skb.
|
||||
*/
|
||||
- if (q->stats.drop_count && sch->q.qlen) {
|
||||
- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
|
||||
+ if (q->stats.drop_count) {
|
||||
+ if (skb)
|
||||
+ sch->q.qlen++;
|
||||
+ qdisc_tree_reduce_backlog(sch, q->stats.drop_count,
|
||||
+ q->stats.drop_len);
|
||||
+ if (skb)
|
||||
+ sch->q.qlen--;
|
||||
q->stats.drop_count = 0;
|
||||
q->stats.drop_len = 0;
|
||||
}
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -316,6 +316,21 @@ begin:
|
||||
flow->dropped += q->cstats.drop_count - prev_drop_count;
|
||||
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
|
||||
|
||||
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
|
||||
+ * parent class, dequeue in parent qdisc will do the same if we
|
||||
+ * return skb. Temporary increment qlen if we have skb.
|
||||
+ */
|
||||
+ if (q->cstats.drop_count) {
|
||||
+ if (skb)
|
||||
+ sch->q.qlen++;
|
||||
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
+ q->cstats.drop_len);
|
||||
+ if (skb)
|
||||
+ sch->q.qlen--;
|
||||
+ q->cstats.drop_count = 0;
|
||||
+ q->cstats.drop_len = 0;
|
||||
+ }
|
||||
+
|
||||
if (!skb) {
|
||||
/* force a pass through old_flows to prevent starvation */
|
||||
if ((head == &q->new_flows) && !list_empty(&q->old_flows))
|
||||
@@ -326,15 +341,6 @@ begin:
|
||||
}
|
||||
qdisc_bstats_update(sch, skb);
|
||||
flow->deficit -= qdisc_pkt_len(skb);
|
||||
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
- * or HTB crashes. Defer it for next round.
|
||||
- */
|
||||
- if (q->cstats.drop_count && sch->q.qlen) {
|
||||
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
- q->cstats.drop_len);
|
||||
- q->cstats.drop_count = 0;
|
||||
- q->cstats.drop_len = 0;
|
||||
- }
|
||||
return skb;
|
||||
}
|
||||
|
@ -0,0 +1,86 @@
|
||||
From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
|
||||
Date: Mon, 21 Aug 2017 11:14:14 +0300
|
||||
Subject: [PATCH] net_sched/codel: do not defer queue length update
|
||||
|
||||
When codel wants to drop last packet in ->dequeue() it cannot call
|
||||
qdisc_tree_reduce_backlog() right away - it will notify parent qdisc
|
||||
about zero qlen and HTB/HFSC will deactivate class. The same class will
|
||||
be deactivated second time by caller of ->dequeue(). Currently codel and
|
||||
fq_codel defer update. This triggers warning in HFSC when it's qlen != 0
|
||||
but there is no active classes.
|
||||
|
||||
This patch update parent queue length immediately: just temporary increase
|
||||
qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation
|
||||
if we have skb to return.
|
||||
|
||||
This might open another problem in HFSC - now operation peek could fail and
|
||||
deactivate parent class.
|
||||
|
||||
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
|
||||
Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581
|
||||
---
|
||||
|
||||
--- a/net/sched/sch_codel.c
|
||||
+++ b/net/sched/sch_codel.c
|
||||
@@ -79,11 +79,17 @@ static struct sk_buff *codel_qdisc_deque
|
||||
|
||||
skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
|
||||
|
||||
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
- * or HTB crashes. Defer it for next round.
|
||||
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
|
||||
+ * parent class, dequeue in parent qdisc will do the same if we
|
||||
+ * return skb. Temporary increment qlen if we have skb.
|
||||
*/
|
||||
- if (q->stats.drop_count && sch->q.qlen) {
|
||||
- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
|
||||
+ if (q->stats.drop_count) {
|
||||
+ if (skb)
|
||||
+ sch->q.qlen++;
|
||||
+ qdisc_tree_reduce_backlog(sch, q->stats.drop_count,
|
||||
+ q->stats.drop_len);
|
||||
+ if (skb)
|
||||
+ sch->q.qlen--;
|
||||
q->stats.drop_count = 0;
|
||||
q->stats.drop_len = 0;
|
||||
}
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -311,6 +311,21 @@ begin:
|
||||
flow->dropped += q->cstats.drop_count - prev_drop_count;
|
||||
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
|
||||
|
||||
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
|
||||
+ * parent class, dequeue in parent qdisc will do the same if we
|
||||
+ * return skb. Temporary increment qlen if we have skb.
|
||||
+ */
|
||||
+ if (q->cstats.drop_count) {
|
||||
+ if (skb)
|
||||
+ sch->q.qlen++;
|
||||
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
+ q->cstats.drop_len);
|
||||
+ if (skb)
|
||||
+ sch->q.qlen--;
|
||||
+ q->cstats.drop_count = 0;
|
||||
+ q->cstats.drop_len = 0;
|
||||
+ }
|
||||
+
|
||||
if (!skb) {
|
||||
/* force a pass through old_flows to prevent starvation */
|
||||
if ((head == &q->new_flows) && !list_empty(&q->old_flows))
|
||||
@@ -321,15 +336,6 @@ begin:
|
||||
}
|
||||
qdisc_bstats_update(sch, skb);
|
||||
flow->deficit -= qdisc_pkt_len(skb);
|
||||
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
- * or HTB crashes. Defer it for next round.
|
||||
- */
|
||||
- if (q->cstats.drop_count && sch->q.qlen) {
|
||||
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
- q->cstats.drop_len);
|
||||
- q->cstats.drop_count = 0;
|
||||
- q->cstats.drop_len = 0;
|
||||
- }
|
||||
return skb;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -471,7 +471,7 @@ static int fq_codel_init(struct Qdisc *s
|
||||
@@ -477,7 +477,7 @@ static int fq_codel_init(struct Qdisc *s
|
||||
|
||||
sch->limit = 10*1024;
|
||||
q->flows_cnt = 1024;
|
||||
|
@ -13,7 +13,7 @@
|
||||
device, it has to decide which ones to send first, which ones to
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -688,7 +688,7 @@ static const struct Qdisc_class_ops fq_c
|
||||
@@ -694,7 +694,7 @@ static const struct Qdisc_class_ops fq_c
|
||||
.walk = fq_codel_walk,
|
||||
};
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
.cl_ops = &fq_codel_class_ops,
|
||||
.id = "fq_codel",
|
||||
.priv_size = sizeof(struct fq_codel_sched_data),
|
||||
@@ -704,6 +704,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
|
||||
@@ -710,6 +710,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
|
||||
.dump_stats = fq_codel_dump_stats,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
@ -0,0 +1,86 @@
|
||||
From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
|
||||
Date: Mon, 21 Aug 2017 11:14:14 +0300
|
||||
Subject: [PATCH] net_sched/codel: do not defer queue length update
|
||||
|
||||
When codel wants to drop last packet in ->dequeue() it cannot call
|
||||
qdisc_tree_reduce_backlog() right away - it will notify parent qdisc
|
||||
about zero qlen and HTB/HFSC will deactivate class. The same class will
|
||||
be deactivated second time by caller of ->dequeue(). Currently codel and
|
||||
fq_codel defer update. This triggers warning in HFSC when it's qlen != 0
|
||||
but there is no active classes.
|
||||
|
||||
This patch update parent queue length immediately: just temporary increase
|
||||
qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation
|
||||
if we have skb to return.
|
||||
|
||||
This might open another problem in HFSC - now operation peek could fail and
|
||||
deactivate parent class.
|
||||
|
||||
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
|
||||
Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581
|
||||
---
|
||||
|
||||
--- a/net/sched/sch_codel.c
|
||||
+++ b/net/sched/sch_codel.c
|
||||
@@ -95,11 +95,17 @@ static struct sk_buff *codel_qdisc_deque
|
||||
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
|
||||
drop_func, dequeue_func);
|
||||
|
||||
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
- * or HTB crashes. Defer it for next round.
|
||||
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
|
||||
+ * parent class, dequeue in parent qdisc will do the same if we
|
||||
+ * return skb. Temporary increment qlen if we have skb.
|
||||
*/
|
||||
- if (q->stats.drop_count && sch->q.qlen) {
|
||||
- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
|
||||
+ if (q->stats.drop_count) {
|
||||
+ if (skb)
|
||||
+ sch->q.qlen++;
|
||||
+ qdisc_tree_reduce_backlog(sch, q->stats.drop_count,
|
||||
+ q->stats.drop_len);
|
||||
+ if (skb)
|
||||
+ sch->q.qlen--;
|
||||
q->stats.drop_count = 0;
|
||||
q->stats.drop_len = 0;
|
||||
}
|
||||
--- a/net/sched/sch_fq_codel.c
|
||||
+++ b/net/sched/sch_fq_codel.c
|
||||
@@ -318,6 +318,21 @@ begin:
|
||||
flow->dropped += q->cstats.drop_count - prev_drop_count;
|
||||
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
|
||||
|
||||
+ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate
|
||||
+ * parent class, dequeue in parent qdisc will do the same if we
|
||||
+ * return skb. Temporary increment qlen if we have skb.
|
||||
+ */
|
||||
+ if (q->cstats.drop_count) {
|
||||
+ if (skb)
|
||||
+ sch->q.qlen++;
|
||||
+ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
+ q->cstats.drop_len);
|
||||
+ if (skb)
|
||||
+ sch->q.qlen--;
|
||||
+ q->cstats.drop_count = 0;
|
||||
+ q->cstats.drop_len = 0;
|
||||
+ }
|
||||
+
|
||||
if (!skb) {
|
||||
/* force a pass through old_flows to prevent starvation */
|
||||
if ((head == &q->new_flows) && !list_empty(&q->old_flows))
|
||||
@@ -328,15 +343,6 @@ begin:
|
||||
}
|
||||
qdisc_bstats_update(sch, skb);
|
||||
flow->deficit -= qdisc_pkt_len(skb);
|
||||
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
- * or HTB crashes. Defer it for next round.
|
||||
- */
|
||||
- if (q->cstats.drop_count && sch->q.qlen) {
|
||||
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
- q->cstats.drop_len);
|
||||
- q->cstats.drop_count = 0;
|
||||
- q->cstats.drop_len = 0;
|
||||
- }
|
||||
return skb;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user