6062d85892
Fixes: - CVE-2020-10757 The "mtd: rawnand: Pass a nand_chip object to nand_release()" commit was backported which needed some adaptations to other code. Run tested: ath79 Build tested: ath79 Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
221 lines
5.6 KiB
Diff
221 lines
5.6 KiB
Diff
From b531d492d5ef1cf9dba0f4888eb5fd8624a6d762 Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Fri, 7 Jul 2017 17:23:42 +0200
|
|
Subject: net: sched: switch default qdisc from pfifo_fast to fq_codel and remove pfifo_fast
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
net/sched/sch_generic.c | 140 ------------------------------------------------
|
|
1 file changed, 140 deletions(-)
|
|
|
|
--- a/net/sched/sch_generic.c
|
|
+++ b/net/sched/sch_generic.c
|
|
@@ -613,207 +613,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
-static const u8 prio2band[TC_PRIO_MAX + 1] = {
|
|
- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
|
|
-};
|
|
-
|
|
-/* 3-band FIFO queue: old style, but should be a bit faster than
|
|
- generic prio+fifo combination.
|
|
- */
|
|
-
|
|
-#define PFIFO_FAST_BANDS 3
|
|
-
|
|
-/*
|
|
- * Private data for a pfifo_fast scheduler containing:
|
|
- * - rings for priority bands
|
|
- */
|
|
-struct pfifo_fast_priv {
|
|
- struct skb_array q[PFIFO_FAST_BANDS];
|
|
-};
|
|
-
|
|
-static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
|
|
- int band)
|
|
-{
|
|
- return &priv->q[band];
|
|
-}
|
|
-
|
|
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|
- struct sk_buff **to_free)
|
|
-{
|
|
- int band = prio2band[skb->priority & TC_PRIO_MAX];
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- struct skb_array *q = band2list(priv, band);
|
|
- unsigned int pkt_len = qdisc_pkt_len(skb);
|
|
- int err;
|
|
-
|
|
- err = skb_array_produce(q, skb);
|
|
-
|
|
- if (unlikely(err))
|
|
- return qdisc_drop_cpu(skb, qdisc, to_free);
|
|
-
|
|
- qdisc_qstats_atomic_qlen_inc(qdisc);
|
|
- /* Note: skb can not be used after skb_array_produce(),
|
|
- * so we better not use qdisc_qstats_cpu_backlog_inc()
|
|
- */
|
|
- this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
|
|
- return NET_XMIT_SUCCESS;
|
|
-}
|
|
-
|
|
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- struct sk_buff *skb = NULL;
|
|
- int band;
|
|
-
|
|
- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
|
|
- struct skb_array *q = band2list(priv, band);
|
|
-
|
|
- if (__skb_array_empty(q))
|
|
- continue;
|
|
-
|
|
- skb = __skb_array_consume(q);
|
|
- }
|
|
- if (likely(skb)) {
|
|
- qdisc_qstats_cpu_backlog_dec(qdisc, skb);
|
|
- qdisc_bstats_cpu_update(qdisc, skb);
|
|
- qdisc_qstats_atomic_qlen_dec(qdisc);
|
|
- }
|
|
-
|
|
- return skb;
|
|
-}
|
|
-
|
|
-static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- struct sk_buff *skb = NULL;
|
|
- int band;
|
|
-
|
|
- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
|
|
- struct skb_array *q = band2list(priv, band);
|
|
-
|
|
- skb = __skb_array_peek(q);
|
|
- }
|
|
-
|
|
- return skb;
|
|
-}
|
|
-
|
|
-static void pfifo_fast_reset(struct Qdisc *qdisc)
|
|
-{
|
|
- int i, band;
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
-
|
|
- for (band = 0; band < PFIFO_FAST_BANDS; band++) {
|
|
- struct skb_array *q = band2list(priv, band);
|
|
- struct sk_buff *skb;
|
|
-
|
|
- /* NULL ring is possible if destroy path is due to a failed
|
|
- * skb_array_init() in pfifo_fast_init() case.
|
|
- */
|
|
- if (!q->ring.queue)
|
|
- continue;
|
|
-
|
|
- while ((skb = __skb_array_consume(q)) != NULL)
|
|
- kfree_skb(skb);
|
|
- }
|
|
-
|
|
- for_each_possible_cpu(i) {
|
|
- struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
|
|
-
|
|
- q->backlog = 0;
|
|
- }
|
|
-}
|
|
-
|
|
-static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
|
|
-{
|
|
- struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
|
|
-
|
|
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
|
|
- if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
|
|
- goto nla_put_failure;
|
|
- return skb->len;
|
|
-
|
|
-nla_put_failure:
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- int prio;
|
|
-
|
|
- /* guard against zero length rings */
|
|
- if (!qlen)
|
|
- return -EINVAL;
|
|
-
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
- int err;
|
|
-
|
|
- err = skb_array_init(q, qlen, GFP_KERNEL);
|
|
- if (err)
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- /* Can by-pass the queue discipline */
|
|
- qdisc->flags |= TCQ_F_CAN_BYPASS;
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void pfifo_fast_destroy(struct Qdisc *sch)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(sch);
|
|
- int prio;
|
|
-
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
-
|
|
- /* NULL ring is possible if destroy path is due to a failed
|
|
- * skb_array_init() in pfifo_fast_init() case.
|
|
- */
|
|
- if (!q->ring.queue)
|
|
- continue;
|
|
- /* Destroy ring but no need to kfree_skb because a call to
|
|
- * pfifo_fast_reset() has already done that work.
|
|
- */
|
|
- ptr_ring_cleanup(&q->ring, NULL);
|
|
- }
|
|
-}
|
|
-
|
|
-static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
|
|
- unsigned int new_len)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(sch);
|
|
- struct skb_array *bands[PFIFO_FAST_BANDS];
|
|
- int prio;
|
|
-
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
-
|
|
- bands[prio] = q;
|
|
- }
|
|
-
|
|
- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
|
|
- GFP_KERNEL);
|
|
-}
|
|
-
|
|
-struct Qdisc_ops pfifo_fast_ops __read_mostly = {
|
|
- .id = "pfifo_fast",
|
|
- .priv_size = sizeof(struct pfifo_fast_priv),
|
|
- .enqueue = pfifo_fast_enqueue,
|
|
- .dequeue = pfifo_fast_dequeue,
|
|
- .peek = pfifo_fast_peek,
|
|
- .init = pfifo_fast_init,
|
|
- .destroy = pfifo_fast_destroy,
|
|
- .reset = pfifo_fast_reset,
|
|
- .dump = pfifo_fast_dump,
|
|
- .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
|
|
- .owner = THIS_MODULE,
|
|
- .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
|
|
-};
|
|
-EXPORT_SYMBOL(pfifo_fast_ops);
|
|
-
|
|
static struct lock_class_key qdisc_tx_busylock;
|
|
static struct lock_class_key qdisc_running_key;
|
|
|