4e0c54bc5b
The following patches were removed because they are integrated in the upstream kernel 5.4: * backport-5.4/047-v4.21-mtd-keep-original-flags-for-every-struct-mtd_info.patch * backport-5.4/048-v4.21-mtd-improve-calculating-partition-boundaries-when-ch.patch * backport-5.4/080-v5.1-0001-bcma-keep-a-direct-pointer-to-the-struct-device.patch * backport-5.4/080-v5.1-0002-bcma-use-dev_-printing-functions.patch * backport-5.4/095-Allow-class-e-address-assignment-via-ifconfig-ioctl.patch * backport-5.4/101-arm-cns3xxx-use-actual-size-reads-for-PCIe.patch * backport-5.4/200-v5.2-usb-dwc2-Set-lpm-mode-parameters-depend-on-HW-configuration.patch * backport-5.4/210-arm64-sve-Disentangle-uapi-asm-ptrace.h-from-uapi-as.patch * backport-5.4/380-v5.3-net-sched-Introduce-act_ctinfo-action.patch * backport-5.4/450-v5.0-mtd-spinand-winbond-Add-support-for-W25N01GV.patch * backport-5.4/451-v5.0-mtd-spinand-Add-initial-support-for-Toshiba-TC58CVG2.patch * backport-5.4/452-v5.0-mtd-spinand-add-support-for-GigaDevice-GD5FxGQ4xA.patch * backport-5.4/455-v5.1-mtd-spinand-Add-support-for-all-Toshiba-Memory-produ.patch * backport-5.4/456-v5.1-mtd-spinand-Add-support-for-GigaDevice-GD5F1GQ4UExxG.patch * backport-5.4/460-v5.0-mtd-spi-nor-Add-support-for-mx25u12835f.patch * backport-5.4/460-v5.3-mtd-spinand-Define-macros-for-page-read-ops-with-thr.patch * backport-5.4/461-v5.3-mtd-spinand-Add-support-for-two-byte-device-IDs.patch * backport-5.4/462-v5.3-mtd-spinand-Add-support-for-GigaDevice-GD5F1GQ4UFxxG.patch * backport-5.4/463-v5.3-mtd-spinand-Add-initial-support-for-Paragon-PN26G0xA.patch * backport-5.4/700-v5.1-net-phylink-only-call-mac_config-during-resolve-when.patch * backport-5.4/701-v5.2-net-phylink-ensure-inband-AN-works-correctly.patch * backport-5.4/702-v4.20-net-ethernet-Add-helper-for-MACs-which-support-asym-.patch * backport-5.4/703-v4.20-net-ethernet-Add-helper-for-set_pauseparam-for-Asym-.patch * backport-5.4/704-v4.20-net-phy-Stop-with-excessive-soft-reset.patch * backport-5.4/705-v5.1-net-phy-provide-full-set-of-accessor-functions-to-MM.patch * backport-5.4/706-v5.1-net-phy-add-register-modifying-helpers-returning-1-o.patch * backport-5.4/707-v5.1-net-phy-add-genphy_c45_check_and_restart_aneg.patch * backport-5.4/708-v5.3-net-phylink-remove-netdev-from-phylink-mii-ioctl-emu.patch * backport-5.4/709-v5.3-net-phylink-support-for-link-gpio-interrupt.patch * backport-5.4/710-v5.3-net-phy-allow-Clause-45-access-via-mii-ioctl.patch * backport-5.4/711-v5.3-net-sfp-add-mandatory-attach-detach-methods-for-sfp-.patch * backport-5.4/712-v5.3-net-sfp-remove-sfp-bus-use-of-netdevs.patch * backport-5.4/713-v5.2-net-phylink-avoid-reducing-support-mask.patch * backport-5.4/714-v5.3-net-sfp-Stop-SFP-polling-and-interrupt-handling-duri.patch * backport-5.4/715-v5.3-net-phylink-don-t-start-and-stop-SGMII-PHYs-in-SFP-m.patch * backport-5.4/740-v5.5-net-phy-avoid-matching-all-ones-clause-45-PHY-IDs.patch * backport-5.4/741-v5.5-net-phylink-fix-link-mode-modification-in-PHY-mode.patch * pending-5.4/103-MIPS-perf-ath79-Fix-perfcount-IRQ-assignment.patch * pending-5.4/131-spi-use-gpio_set_value_cansleep-for-setting-chipsele.patch * pending-5.4/132-spi-spi-gpio-fix-crash-when-num-chipselects-is-0.patch * pending-5.4/220-optimize_inlining.patch * pending-5.4/341-MIPS-mm-remove-no-op-dma_map_ops-where-possible.patch * pending-5.4/475-mtd-spi-nor-Add-Winbond-w25q128jv-support.patch * pending-5.4/477-mtd-add-spi-nor-add-mx25u3235f.patch * pending-5.4/479-mtd-spi-nor-add-eon-en25qh64.patch Some bigger changes were done to this feature and we did not port this patch yet: * hack-5.4/207-disable-modorder.patch This depends on BOOTMEM which was removed from the kernel, this needs some bigger changes: * hack-5.4/930-crashlog.patch A different version of the FPU disable patch was merged upstream, OpenWrt needs some adaptations. * pending-5.4/304-mips_disable_fpu.patch - no crashlog support yet as a required file got deleted upstream - Removed patch below, which is now seen as a recursive dependency [1] - Removed patch below due to build error [2] - fix still required to avoid identical function def [3] - Fixes included from Blocktrron - Fixes included from Chunkeey - Fix included from nbd regarding "dst leak in Flow Offload" [1] target/linux/generic/hack-5.4/260-crypto_test_dependencies.patch [2] target/linux/generic/hack-5.4/207-disable-modorder.patch [3] target/linux/generic/pending-5.4/613-netfilter_optional_tcp_window_check.patch Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com> Signed-off-by: David Bauer <mail@david-bauer.net> Signed-off-by: Christian Lamparter <chunkeey@gmail.com> Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com> Signed-off-by: Robert Marko <robimarko@gmail.com> Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
225 lines
5.6 KiB
Diff
225 lines
5.6 KiB
Diff
From b531d492d5ef1cf9dba0f4888eb5fd8624a6d762 Mon Sep 17 00:00:00 2001
|
|
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Fri, 7 Jul 2017 17:23:42 +0200
|
|
Subject: net: sched: switch default qdisc from pfifo_fast to fq_codel and remove pfifo_fast
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
net/sched/sch_generic.c | 140 ------------------------------------------------
|
|
1 file changed, 140 deletions(-)
|
|
|
|
--- a/net/sched/sch_generic.c
|
|
+++ b/net/sched/sch_generic.c
|
|
@@ -594,211 +594,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
-static const u8 prio2band[TC_PRIO_MAX + 1] = {
|
|
- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
|
|
-};
|
|
-
|
|
-/* 3-band FIFO queue: old style, but should be a bit faster than
|
|
- generic prio+fifo combination.
|
|
- */
|
|
-
|
|
-#define PFIFO_FAST_BANDS 3
|
|
-
|
|
-/*
|
|
- * Private data for a pfifo_fast scheduler containing:
|
|
- * - rings for priority bands
|
|
- */
|
|
-struct pfifo_fast_priv {
|
|
- struct skb_array q[PFIFO_FAST_BANDS];
|
|
-};
|
|
-
|
|
-static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
|
|
- int band)
|
|
-{
|
|
- return &priv->q[band];
|
|
-}
|
|
-
|
|
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
|
- struct sk_buff **to_free)
|
|
-{
|
|
- int band = prio2band[skb->priority & TC_PRIO_MAX];
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- struct skb_array *q = band2list(priv, band);
|
|
- unsigned int pkt_len = qdisc_pkt_len(skb);
|
|
- int err;
|
|
-
|
|
- err = skb_array_produce(q, skb);
|
|
-
|
|
- if (unlikely(err)) {
|
|
- if (qdisc_is_percpu_stats(qdisc))
|
|
- return qdisc_drop_cpu(skb, qdisc, to_free);
|
|
- else
|
|
- return qdisc_drop(skb, qdisc, to_free);
|
|
- }
|
|
-
|
|
- qdisc_update_stats_at_enqueue(qdisc, pkt_len);
|
|
- return NET_XMIT_SUCCESS;
|
|
-}
|
|
-
|
|
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- struct sk_buff *skb = NULL;
|
|
- int band;
|
|
-
|
|
- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
|
|
- struct skb_array *q = band2list(priv, band);
|
|
-
|
|
- if (__skb_array_empty(q))
|
|
- continue;
|
|
-
|
|
- skb = __skb_array_consume(q);
|
|
- }
|
|
- if (likely(skb)) {
|
|
- qdisc_update_stats_at_dequeue(qdisc, skb);
|
|
- } else {
|
|
- WRITE_ONCE(qdisc->empty, true);
|
|
- }
|
|
-
|
|
- return skb;
|
|
-}
|
|
-
|
|
-static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- struct sk_buff *skb = NULL;
|
|
- int band;
|
|
-
|
|
- for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
|
|
- struct skb_array *q = band2list(priv, band);
|
|
-
|
|
- skb = __skb_array_peek(q);
|
|
- }
|
|
-
|
|
- return skb;
|
|
-}
|
|
-
|
|
-static void pfifo_fast_reset(struct Qdisc *qdisc)
|
|
-{
|
|
- int i, band;
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
-
|
|
- for (band = 0; band < PFIFO_FAST_BANDS; band++) {
|
|
- struct skb_array *q = band2list(priv, band);
|
|
- struct sk_buff *skb;
|
|
-
|
|
- /* NULL ring is possible if destroy path is due to a failed
|
|
- * skb_array_init() in pfifo_fast_init() case.
|
|
- */
|
|
- if (!q->ring.queue)
|
|
- continue;
|
|
-
|
|
- while ((skb = __skb_array_consume(q)) != NULL)
|
|
- kfree_skb(skb);
|
|
- }
|
|
-
|
|
- if (qdisc_is_percpu_stats(qdisc)) {
|
|
- for_each_possible_cpu(i) {
|
|
- struct gnet_stats_queue *q;
|
|
-
|
|
- q = per_cpu_ptr(qdisc->cpu_qstats, i);
|
|
- q->backlog = 0;
|
|
- q->qlen = 0;
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
|
|
-{
|
|
- struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
|
|
-
|
|
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
|
|
- if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
|
|
- goto nla_put_failure;
|
|
- return skb->len;
|
|
-
|
|
-nla_put_failure:
|
|
- return -1;
|
|
-}
|
|
-
|
|
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
|
|
- struct netlink_ext_ack *extack)
|
|
-{
|
|
- unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
|
|
- int prio;
|
|
-
|
|
- /* guard against zero length rings */
|
|
- if (!qlen)
|
|
- return -EINVAL;
|
|
-
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
- int err;
|
|
-
|
|
- err = skb_array_init(q, qlen, GFP_KERNEL);
|
|
- if (err)
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- /* Can by-pass the queue discipline */
|
|
- qdisc->flags |= TCQ_F_CAN_BYPASS;
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void pfifo_fast_destroy(struct Qdisc *sch)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(sch);
|
|
- int prio;
|
|
-
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
-
|
|
- /* NULL ring is possible if destroy path is due to a failed
|
|
- * skb_array_init() in pfifo_fast_init() case.
|
|
- */
|
|
- if (!q->ring.queue)
|
|
- continue;
|
|
- /* Destroy ring but no need to kfree_skb because a call to
|
|
- * pfifo_fast_reset() has already done that work.
|
|
- */
|
|
- ptr_ring_cleanup(&q->ring, NULL);
|
|
- }
|
|
-}
|
|
-
|
|
-static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
|
|
- unsigned int new_len)
|
|
-{
|
|
- struct pfifo_fast_priv *priv = qdisc_priv(sch);
|
|
- struct skb_array *bands[PFIFO_FAST_BANDS];
|
|
- int prio;
|
|
-
|
|
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
|
|
- struct skb_array *q = band2list(priv, prio);
|
|
-
|
|
- bands[prio] = q;
|
|
- }
|
|
-
|
|
- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
|
|
- GFP_KERNEL);
|
|
-}
|
|
-
|
|
-struct Qdisc_ops pfifo_fast_ops __read_mostly = {
|
|
- .id = "pfifo_fast",
|
|
- .priv_size = sizeof(struct pfifo_fast_priv),
|
|
- .enqueue = pfifo_fast_enqueue,
|
|
- .dequeue = pfifo_fast_dequeue,
|
|
- .peek = pfifo_fast_peek,
|
|
- .init = pfifo_fast_init,
|
|
- .destroy = pfifo_fast_destroy,
|
|
- .reset = pfifo_fast_reset,
|
|
- .dump = pfifo_fast_dump,
|
|
- .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
|
|
- .owner = THIS_MODULE,
|
|
- .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
|
|
-};
|
|
-EXPORT_SYMBOL(pfifo_fast_ops);
|
|
-
|
|
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|
const struct Qdisc_ops *ops,
|
|
struct netlink_ext_ack *extack)
|