88ba41453d
Refresh patches. Remove upstreamed patches: - backport/080-v4.15-0001-arch-define-weak-abort.patch - backport/081-v4.15-0002-kernel-exit.c-export-abort-to-modules.patch Update patch that no longer applies: pending/493-ubi-set-ROOT_DEV-to-ubiblock-rootfs-if-unset.patch Fixes CVE-2017-8824. Compile-tested: octeon, x86/64. Runtime-tested: octeon, x86/64. Signed-off-by: Stijn Tintel <stijn@linux-ipv6.be>
93 lines
2.8 KiB
Diff
93 lines
2.8 KiB
Diff
From f974e397b806f7b16d11cc1542538616291924f1 Mon Sep 17 00:00:00 2001
|
|
From: John Crispin <john@phrozen.org>
|
|
Date: Sat, 23 Apr 2016 11:57:21 +0200
|
|
Subject: [PATCH 27/57] net-next: mediatek: fix DQL support
|
|
|
|
The MTK ethernet core has 2 MACs both sitting on the same DMA ring. The
|
|
current code will assign the TX traffic of each MAC to its own DQL. This
|
|
results in the amount of data, that DQL says is in the queue incorrect. As
|
|
the data from multiple devices is infact enqueued. This makes any decision
|
|
based on these value non deterministic. Fix this by tracking all TX
|
|
traffic, regardless of the MAC it belongs to in the DQL of all devices
|
|
using the DMA.
|
|
|
|
Signed-off-by: John Crispin <john@phrozen.org>
|
|
---
|
|
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 35 +++++++++++++++++------------
|
|
1 file changed, 21 insertions(+), 14 deletions(-)
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
@@ -779,7 +779,16 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
|
|
(!nr_frags * TX_DMA_LS0)));
|
|
|
|
- netdev_sent_queue(dev, skb->len);
|
|
+ /* we have a single DMA ring so BQL needs to be updated for all devices
|
|
+ * sitting on this ring
|
|
+ */
|
|
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
+ if (!eth->netdev[i])
|
|
+ continue;
|
|
+
|
|
+ netdev_sent_queue(eth->netdev[i], skb->len);
|
|
+ }
|
|
+
|
|
skb_tx_timestamp(skb);
|
|
|
|
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
|
|
@@ -1076,20 +1085,17 @@ static int mtk_poll_tx(struct mtk_eth *e
|
|
struct mtk_tx_dma *desc;
|
|
struct sk_buff *skb;
|
|
struct mtk_tx_buf *tx_buf;
|
|
- unsigned int done[MTK_MAX_DEVS];
|
|
- unsigned int bytes[MTK_MAX_DEVS];
|
|
+ int total = 0, done = 0;
|
|
+ unsigned int bytes = 0;
|
|
u32 cpu, dma;
|
|
- int total = 0, i;
|
|
-
|
|
- memset(done, 0, sizeof(done));
|
|
- memset(bytes, 0, sizeof(bytes));
|
|
+ int i;
|
|
|
|
cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
|
|
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
|
|
|
|
desc = mtk_qdma_phys_to_virt(ring, cpu);
|
|
|
|
- while ((cpu != dma) && budget) {
|
|
+ while ((cpu != dma) && (done < budget)) {
|
|
u32 next_cpu = desc->txd2;
|
|
int mac = 0;
|
|
|
|
@@ -1106,9 +1112,8 @@ static int mtk_poll_tx(struct mtk_eth *e
|
|
break;
|
|
|
|
if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
|
|
- bytes[mac] += skb->len;
|
|
- done[mac]++;
|
|
- budget--;
|
|
+ bytes += skb->len;
|
|
+ done++;
|
|
}
|
|
mtk_tx_unmap(eth, tx_buf);
|
|
|
|
@@ -1120,11 +1125,13 @@ static int mtk_poll_tx(struct mtk_eth *e
|
|
|
|
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
|
|
|
|
+ /* we have a single DMA ring so BQL needs to be updated for all devices
|
|
+ * sitting on this ring
|
|
+ */
|
|
for (i = 0; i < MTK_MAC_COUNT; i++) {
|
|
- if (!eth->netdev[i] || !done[i])
|
|
+ if (!eth->netdev[i])
|
|
continue;
|
|
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
|
|
- total += done[i];
|
|
+ netdev_completed_queue(eth->netdev[i], done, bytes);
|
|
}
|
|
|
|
if (mtk_queue_stopped(eth) &&
|