2020-02-23 12:20:11 +00:00
|
|
|
From b531d492d5ef1cf9dba0f4888eb5fd8624a6d762 Mon Sep 17 00:00:00 2001
|
|
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
|
|
Date: Fri, 7 Jul 2017 17:23:42 +0200
|
|
|
|
Subject: net: sched: switch default qdisc from pfifo_fast to fq_codel and remove pfifo_fast
|
|
|
|
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
|
|
---
|
|
|
|
net/sched/sch_generic.c | 140 ------------------------------------------------
|
|
|
|
1 file changed, 140 deletions(-)
|
|
|
|
|
|
|
|
--- a/net/sched/sch_generic.c
|
|
|
|
+++ b/net/sched/sch_generic.c
|
2020-07-04 13:16:27 +00:00
|
|
|
@@ -595,211 +595,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
|
2020-02-23 12:20:11 +00:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
-static const u8 prio2band[TC_PRIO_MAX + 1] = {
|
|
|
|
- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/* 3-band FIFO queue: old style, but should be a bit faster than
|
|
|
|
- generic prio+fifo combination.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-#define PFIFO_FAST_BANDS 3
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Private data for a pfifo_fast scheduler containing:
|
|
|
|
- * - rings for priority bands
|
|
|
|
- */
|
|
|
|
-struct pfifo_fast_priv {
|
|
|
|
- struct skb_array q[PFIFO_FAST_BANDS];
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
|
|
|
|
- int band)
|
|
|
|
-{
|
|
|
|
- return &priv->q[band];
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|
|
|
- struct sk_buff **to_free)
|
|
|
|
-{
|
|
|
|
- int band = prio2band[skb->priority & TC_PRIO_MAX];
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
|
|
- struct skb_array *q = band2list(priv, band);
|
|
|
|
- unsigned int pkt_len = qdisc_pkt_len(skb);
|
|
|
|
- int err;
|
|
|
|
-
|
|
|
|
- err = skb_array_produce(q, skb);
|
|
|
|
-
|
2019-11-25 15:08:31 +00:00
|
|
|
- if (unlikely(err)) {
|
|
|
|
- if (qdisc_is_percpu_stats(qdisc))
|
|
|
|
- return qdisc_drop_cpu(skb, qdisc, to_free);
|
|
|
|
- else
|
|
|
|
- return qdisc_drop(skb, qdisc, to_free);
|
|
|
|
- }
|
2020-02-23 12:20:11 +00:00
|
|
|
-
|
2019-11-25 15:08:31 +00:00
|
|
|
- qdisc_update_stats_at_enqueue(qdisc, pkt_len);
|
2020-02-23 12:20:11 +00:00
|
|
|
- return NET_XMIT_SUCCESS;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
|
|
|
-{
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
|
|
- struct sk_buff *skb = NULL;
|
|
|
|
- int band;
|
|
|
|
-
|
|
|
|
- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
|
|
|
|
- struct skb_array *q = band2list(priv, band);
|
|
|
|
-
|
|
|
|
- if (__skb_array_empty(q))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- skb = __skb_array_consume(q);
|
|
|
|
- }
|
|
|
|
- if (likely(skb)) {
|
2019-11-25 15:08:31 +00:00
|
|
|
- qdisc_update_stats_at_dequeue(qdisc, skb);
|
|
|
|
- } else {
|
|
|
|
- WRITE_ONCE(qdisc->empty, true);
|
2020-02-23 12:20:11 +00:00
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return skb;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
|
|
|
|
-{
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
|
|
- struct sk_buff *skb = NULL;
|
|
|
|
- int band;
|
|
|
|
-
|
|
|
|
- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
|
|
|
|
- struct skb_array *q = band2list(priv, band);
|
|
|
|
-
|
|
|
|
- skb = __skb_array_peek(q);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return skb;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void pfifo_fast_reset(struct Qdisc *qdisc)
|
|
|
|
-{
|
|
|
|
- int i, band;
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
|
|
-
|
|
|
|
- for (band = 0; band < PFIFO_FAST_BANDS; band++) {
|
|
|
|
- struct skb_array *q = band2list(priv, band);
|
|
|
|
- struct sk_buff *skb;
|
|
|
|
-
|
|
|
|
- /* NULL ring is possible if destroy path is due to a failed
|
|
|
|
- * skb_array_init() in pfifo_fast_init() case.
|
|
|
|
- */
|
|
|
|
- if (!q->ring.queue)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- while ((skb = __skb_array_consume(q)) != NULL)
|
|
|
|
- kfree_skb(skb);
|
|
|
|
- }
|
|
|
|
-
|
2019-11-25 15:08:31 +00:00
|
|
|
- if (qdisc_is_percpu_stats(qdisc)) {
|
|
|
|
- for_each_possible_cpu(i) {
|
|
|
|
- struct gnet_stats_queue *q;
|
2020-02-23 12:20:11 +00:00
|
|
|
-
|
2019-11-25 15:08:31 +00:00
|
|
|
- q = per_cpu_ptr(qdisc->cpu_qstats, i);
|
|
|
|
- q->backlog = 0;
|
|
|
|
- q->qlen = 0;
|
|
|
|
- }
|
2020-02-23 12:20:11 +00:00
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
|
|
|
|
-{
|
|
|
|
- struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
|
|
|
|
-
|
|
|
|
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
|
|
|
|
- if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
|
|
|
|
- goto nla_put_failure;
|
|
|
|
- return skb->len;
|
|
|
|
-
|
|
|
|
-nla_put_failure:
|
|
|
|
- return -1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
|
|
|
|
- struct netlink_ext_ack *extack)
|
|
|
|
-{
|
|
|
|
- unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
|
|
- int prio;
|
|
|
|
-
|
|
|
|
- /* guard against zero length rings */
|
|
|
|
- if (!qlen)
|
|
|
|
- return -EINVAL;
|
|
|
|
-
|
|
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
|
|
- int err;
|
|
|
|
-
|
|
|
|
- err = skb_array_init(q, qlen, GFP_KERNEL);
|
|
|
|
- if (err)
|
|
|
|
- return -ENOMEM;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Can by-pass the queue discipline */
|
|
|
|
- qdisc->flags |= TCQ_F_CAN_BYPASS;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void pfifo_fast_destroy(struct Qdisc *sch)
|
|
|
|
-{
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(sch);
|
|
|
|
- int prio;
|
|
|
|
-
|
|
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
|
|
-
|
|
|
|
- /* NULL ring is possible if destroy path is due to a failed
|
|
|
|
- * skb_array_init() in pfifo_fast_init() case.
|
|
|
|
- */
|
|
|
|
- if (!q->ring.queue)
|
|
|
|
- continue;
|
|
|
|
- /* Destroy ring but no need to kfree_skb because a call to
|
|
|
|
- * pfifo_fast_reset() has already done that work.
|
|
|
|
- */
|
|
|
|
- ptr_ring_cleanup(&q->ring, NULL);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
|
|
|
|
- unsigned int new_len)
|
|
|
|
-{
|
|
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(sch);
|
|
|
|
- struct skb_array *bands[PFIFO_FAST_BANDS];
|
|
|
|
- int prio;
|
|
|
|
-
|
|
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
|
|
-
|
|
|
|
- bands[prio] = q;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
|
|
|
|
- GFP_KERNEL);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-struct Qdisc_ops pfifo_fast_ops __read_mostly = {
|
|
|
|
- .id = "pfifo_fast",
|
|
|
|
- .priv_size = sizeof(struct pfifo_fast_priv),
|
|
|
|
- .enqueue = pfifo_fast_enqueue,
|
|
|
|
- .dequeue = pfifo_fast_dequeue,
|
|
|
|
- .peek = pfifo_fast_peek,
|
|
|
|
- .init = pfifo_fast_init,
|
|
|
|
- .destroy = pfifo_fast_destroy,
|
|
|
|
- .reset = pfifo_fast_reset,
|
|
|
|
- .dump = pfifo_fast_dump,
|
|
|
|
- .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
|
|
|
|
- .owner = THIS_MODULE,
|
|
|
|
- .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
|
|
|
|
-};
|
|
|
|
-EXPORT_SYMBOL(pfifo_fast_ops);
|
|
|
|
-
|
2019-11-25 15:08:31 +00:00
|
|
|
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|
|
|
const struct Qdisc_ops *ops,
|
|
|
|
struct netlink_ext_ack *extack)
|