kernel: bump 4.9 to 4.9.57

Refresh patches.
Compile-tested for ar71xx - Archer C7 v2
Runtime-tested on  ar71xx - Archer C7 v2

Fixes the following CVEs:

- CVE-2017-7518
- CVE-2017-0786
- CVE-2017-1000255
- CVE-2017-12188
- CVE-2017-15265

Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
v19.07.3_mercusys_ac12_duma
Kevin Darbyshire-Bryant 7 years ago committed by Stijn Tintel
parent 060e37567e
commit 886d66abcd

@ -4,11 +4,11 @@ LINUX_RELEASE?=1
LINUX_VERSION-3.18 = .71
LINUX_VERSION-4.4 = .92
LINUX_VERSION-4.9 = .54
LINUX_VERSION-4.9 = .57
LINUX_KERNEL_HASH-3.18.71 = 5abc9778ad44ce02ed6c8ab52ece8a21c6d20d21f6ed8a19287b4a38a50c1240
LINUX_KERNEL_HASH-4.4.92 = 53f8cd8b024444df0f242f8e6ab5147b0b009d7a30e8b2ed3854e8d17937460d
LINUX_KERNEL_HASH-4.9.54 = 651005db6efbce4fcd607415ebd697dd8d2f5a2abc2c632b11ece03a1a210fc5
LINUX_KERNEL_HASH-4.9.57 = 09230554ec6a34a12e2d2a6b32733aed3c9bc90b1662cc1b06dd67bf726c96a6
ifdef KERNEL_PATCHVER
LINUX_VERSION:=$(KERNEL_PATCHVER)$(strip $(LINUX_VERSION-$(KERNEL_PATCHVER)))

@ -44,7 +44,7 @@ Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
#include "xhci.h"
#include "xhci-trace.h"
@@ -248,6 +250,458 @@ static void xhci_pme_acpi_rtd3_enable(st
@@ -236,6 +238,458 @@ static void xhci_pme_acpi_rtd3_enable(st
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
#endif /* CONFIG_ACPI */
@ -503,7 +503,7 @@ Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
@@ -287,6 +741,22 @@ static int xhci_pci_probe(struct pci_dev
@@ -275,6 +729,22 @@ static int xhci_pci_probe(struct pci_dev
struct hc_driver *driver;
struct usb_hcd *hcd;
@ -526,7 +526,7 @@ Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
driver = (struct hc_driver *)id->driver_data;
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
@@ -344,6 +814,16 @@ static void xhci_pci_remove(struct pci_d
@@ -332,6 +802,16 @@ static void xhci_pci_remove(struct pci_d
{
struct xhci_hcd *xhci;

@ -13,7 +13,7 @@ produce a noisy warning.
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -205,7 +205,7 @@ static void xhci_pci_quirks(struct devic
@@ -193,7 +193,7 @@ static void xhci_pci_quirks(struct devic
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015)

@ -129,7 +129,7 @@ it on BCM4708 family.
+++ b/drivers/usb/host/xhci.h
@@ -1662,6 +1662,7 @@ struct xhci_hcd {
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
#define XHCI_U2_DISABLE_WAKE (1 << 27)
/* Reserved. It was XHCI_U2_DISABLE_WAKE */
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+#define XHCI_FAKE_DOORBELL (1 << 29)

@ -19,7 +19,7 @@ Reduces overhead when using X
module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644);
MODULE_PARM_DESC(mousepoll, "Polling interval of mice");
@@ -1083,8 +1083,12 @@ static int usbhid_start(struct hid_devic
@@ -1093,8 +1093,12 @@ static int usbhid_start(struct hid_devic
}
/* Change the polling interval of mice. */

@ -21,7 +21,7 @@ Signed-off-by: Eric Anholt <eric@anholt.net>
}
kfree(pagelist);
if (actual_pages == 0)
@@ -577,7 +577,7 @@ free_pagelist(PAGELIST_T *pagelist, int
@@ -579,7 +579,7 @@ free_pagelist(PAGELIST_T *pagelist, int
offset = 0;
set_page_dirty(pg);
}

@ -1,6 +1,6 @@
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -414,6 +414,11 @@ static int bcm5481_config_aneg(struct ph
@@ -420,6 +420,11 @@ static int bcm5481_config_aneg(struct ph
/* Write bits 14:0. */
reg |= (1 << 15);
phy_write(phydev, 0x18, reg);

@ -60,7 +60,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
nval = cmpxchg(&tp->tsq_flags, oval, nval);
if (nval != oval)
continue;
@@ -2179,6 +2179,8 @@ static bool tcp_write_xmit(struct sock *
@@ -2182,6 +2182,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;

@ -19,7 +19,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2084,6 +2084,15 @@ static bool tcp_small_queue_check(struct
@@ -2087,6 +2087,15 @@ static bool tcp_small_queue_check(struct
limit <<= factor;
if (atomic_read(&sk->sk_wmem_alloc) > limit) {

@ -17,7 +17,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1925,26 +1925,26 @@ static inline void tcp_mtu_check_reprobe
@@ -1928,26 +1928,26 @@ static inline void tcp_mtu_check_reprobe
*/
static int tcp_mtu_probe(struct sock *sk)
{

@ -114,7 +114,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (nval != oval)
continue;
@@ -2093,7 +2093,7 @@ static bool tcp_small_queue_check(struct
@@ -2096,7 +2096,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
@ -123,7 +123,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
@@ -2191,8 +2191,8 @@ static bool tcp_write_xmit(struct sock *
@@ -2194,8 +2194,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
@ -134,7 +134,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (tcp_small_queue_check(sk, skb, 0))
break;
@@ -3495,8 +3495,6 @@ void tcp_send_ack(struct sock *sk)
@@ -3508,8 +3508,6 @@ void tcp_send_ack(struct sock *sk)
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784

@ -83,7 +83,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
EXPORT_SYMBOL(default_qdisc_ops);
/* Main transmission queue. */
@@ -759,7 +759,7 @@ static void attach_one_default_qdisc(str
@@ -760,7 +760,7 @@ static void attach_one_default_qdisc(str
void *_unused)
{
struct Qdisc *qdisc;

@ -327,7 +327,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3082,6 +3082,8 @@ static __net_initdata struct pernet_oper
@@ -3084,6 +3084,8 @@ static __net_initdata struct pernet_oper
static int __init proto_init(void)
{

@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#define PACKET_FANOUT_LB 1
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1772,6 +1772,7 @@ static int packet_rcv_spkt(struct sk_buf
@@ -1778,6 +1778,7 @@ static int packet_rcv_spkt(struct sk_buf
{
struct sock *sk;
struct sockaddr_pkt *spkt;
@ -38,7 +38,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/*
* When we registered the protocol we saved the socket in the data
@@ -1779,6 +1780,7 @@ static int packet_rcv_spkt(struct sk_buf
@@ -1785,6 +1786,7 @@ static int packet_rcv_spkt(struct sk_buf
*/
sk = pt->af_packet_priv;
@ -46,7 +46,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/*
* Yank back the headers [hope the device set this
@@ -1791,7 +1793,7 @@ static int packet_rcv_spkt(struct sk_buf
@@ -1797,7 +1799,7 @@ static int packet_rcv_spkt(struct sk_buf
* so that this procedure is noop.
*/
@ -55,7 +55,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
goto out;
if (!net_eq(dev_net(dev), sock_net(sk)))
@@ -2029,12 +2031,12 @@ static int packet_rcv(struct sk_buff *sk
@@ -2035,12 +2037,12 @@ static int packet_rcv(struct sk_buff *sk
unsigned int snaplen, res;
bool is_drop_n_account = false;
@ -71,7 +71,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
@@ -2160,12 +2162,12 @@ static int tpacket_rcv(struct sk_buff *s
@@ -2166,12 +2168,12 @@ static int tpacket_rcv(struct sk_buff *s
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
@ -87,7 +87,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
@@ -3240,6 +3242,7 @@ static int packet_create(struct net *net
@@ -3250,6 +3252,7 @@ static int packet_create(struct net *net
mutex_init(&po->pg_vec_lock);
po->rollover = NULL;
po->prot_hook.func = packet_rcv;
@ -95,7 +95,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
@@ -3826,6 +3829,16 @@ packet_setsockopt(struct socket *sock, i
@@ -3836,6 +3839,16 @@ packet_setsockopt(struct socket *sock, i
po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
return 0;
}
@ -112,7 +112,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
default:
return -ENOPROTOOPT;
}
@@ -3878,6 +3891,13 @@ static int packet_getsockopt(struct sock
@@ -3888,6 +3901,13 @@ static int packet_getsockopt(struct sock
case PACKET_VNET_HDR:
val = po->has_vnet_hdr;
break;

@ -300,7 +300,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
/**
* ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
* @t: the outgoing tunnel device
@@ -1285,6 +1425,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
@@ -1286,6 +1426,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
{
struct ip6_tnl *t = netdev_priv(dev);
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
@ -308,7 +308,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
@@ -1343,6 +1484,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
@@ -1344,6 +1485,18 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, str
fl6.flowi6_mark = skb->mark;
}
@ -327,7 +327,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
@@ -1470,6 +1623,14 @@ ip6_tnl_change(struct ip6_tnl *t, const
@@ -1471,6 +1624,14 @@ ip6_tnl_change(struct ip6_tnl *t, const
t->parms.flowinfo = p->flowinfo;
t->parms.link = p->link;
t->parms.proto = p->proto;
@ -342,7 +342,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
dst_cache_reset(&t->dst_cache);
ip6_tnl_link_config(t);
return 0;
@@ -1508,6 +1669,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_
@@ -1509,6 +1670,7 @@ ip6_tnl_parm_from_user(struct __ip6_tnl_
p->flowinfo = u->flowinfo;
p->link = u->link;
p->proto = u->proto;
@ -350,7 +350,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
memcpy(p->name, u->name, sizeof(u->name));
}
@@ -1885,6 +2047,15 @@ static int ip6_tnl_validate(struct nlatt
@@ -1886,6 +2048,15 @@ static int ip6_tnl_validate(struct nlatt
return 0;
}
@ -366,7 +366,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
static void ip6_tnl_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
@@ -1919,6 +2090,46 @@ static void ip6_tnl_netlink_parms(struct
@@ -1920,6 +2091,46 @@ static void ip6_tnl_netlink_parms(struct
if (data[IFLA_IPTUN_COLLECT_METADATA])
parms->collect_md = true;
@ -413,7 +413,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
}
static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
@@ -2028,6 +2239,12 @@ static void ip6_tnl_dellink(struct net_d
@@ -2029,6 +2240,12 @@ static void ip6_tnl_dellink(struct net_d
static size_t ip6_tnl_get_size(const struct net_device *dev)
{
@ -426,7 +426,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
return
/* IFLA_IPTUN_LINK */
nla_total_size(4) +
@@ -2055,6 +2272,24 @@ static size_t ip6_tnl_get_size(const str
@@ -2056,6 +2273,24 @@ static size_t ip6_tnl_get_size(const str
nla_total_size(2) +
/* IFLA_IPTUN_COLLECT_METADATA */
nla_total_size(0) +
@ -451,7 +451,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
0;
}
@@ -2062,6 +2297,9 @@ static int ip6_tnl_fill_info(struct sk_b
@@ -2063,6 +2298,9 @@ static int ip6_tnl_fill_info(struct sk_b
{
struct ip6_tnl *tunnel = netdev_priv(dev);
struct __ip6_tnl_parm *parm = &tunnel->parms;
@ -461,7 +461,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
@@ -2070,9 +2308,27 @@ static int ip6_tnl_fill_info(struct sk_b
@@ -2071,9 +2309,27 @@ static int ip6_tnl_fill_info(struct sk_b
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
@ -490,7 +490,7 @@ Signed-off-by: Steven Barth <cyrus@openwrt.org>
if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
@@ -2110,6 +2366,7 @@ static const struct nla_policy ip6_tnl_p
@@ -2111,6 +2367,7 @@ static const struct nla_policy ip6_tnl_p
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },

@ -1065,7 +1065,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
static void mlx5e_set_rx_mode(struct net_device *dev)
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -948,15 +948,13 @@ out:
@@ -949,15 +949,13 @@ out:
/* Return the stats from a cache that is updated periodically,
* as this function might get called in an atomic context.
*/

@ -34,7 +34,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
return entry->group;
}
@@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu
@@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu
return false;
}
@ -47,7 +47,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
struct unity_map_entry *entry;
int devid;
@@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(str
@@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
@ -118,7 +118,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
{
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
unsigned long start, end;
@@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_
@@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,

@ -1,7 +1,5 @@
Index: linux-4.9.50/drivers/net/ethernet/mediatek/mtk_eth_soc.c
===================================================================
--- linux-4.9.50.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ linux-4.9.50/drivers/net/ethernet/mediatek/mtk_eth_soc.c
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1533,7 +1533,10 @@ static void mtk_hwlro_rx_uninit(struct m
for (i = 0; i < 10; i++) {
val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);

@ -25,7 +25,7 @@ Signed-off-by: John Crispin <blogic@openwrt.org>
bool "SGI IP22 (Indy/Indigo2)"
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -114,6 +114,11 @@ config PINCTRL_LPC18XX
@@ -115,6 +115,11 @@ config PINCTRL_LPC18XX
help
Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU).

@ -2887,7 +2887,7 @@
+
+ /* Display RX ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
}
+ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
@ -2906,15 +2906,15 @@
+ head_tx = (void *)tx_q->dma_etx;
+ else
+ head_tx = (void *)tx_q->dma_tx;
+
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
+}
- /* Display Rx ring */
- priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
- /* Display Tx ring */
- priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
+ }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ /* Display RX ring */
@ -3131,7 +3131,7 @@
if (priv->hw->mode->set_16kib_bfsize)
bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
@@ -1018,235 +1228,409 @@ static int init_dma_desc_rings(struct ne
@@ -1018,257 +1228,516 @@ static int init_dma_desc_rings(struct ne
priv->dma_buf_sz = bfsize;
@ -3163,7 +3163,10 @@
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
- ret = stmmac_init_rx_buffers(priv, p, i, flags);
- if (ret)
- goto err_init_rx_buffers;
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
@ -3173,18 +3176,15 @@
+ rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
+ (unsigned int)rx_q->rx_skbuff_dma[i]);
+ }
- ret = stmmac_init_rx_buffers(priv, p, i, flags);
- if (ret)
- goto err_init_rx_buffers;
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+
+ stmmac_clear_rx_descriptors(priv, queue);
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
- priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
- (unsigned int)priv->rx_skbuff_dma[i]);
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
@ -3277,8 +3277,13 @@
+ priv->hw->mode->init(tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ DMA_TX_SIZE, 0);
+ }
+
}
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- priv->tx_skbuff_dma[i].len = 0;
- priv->tx_skbuff_dma[i].last_segment = false;
- priv->tx_skbuff[i] = NULL;
+ for (i = 0; i < DMA_TX_SIZE; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
@ -3300,13 +3305,8 @@
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
}
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- priv->tx_skbuff_dma[i].len = 0;
- priv->tx_skbuff_dma[i].last_segment = false;
- priv->tx_skbuff[i] = NULL;
+ }
+
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+
@ -3387,17 +3387,10 @@
- priv->tx_skbuff_dma[i].buf,
- priv->tx_skbuff_dma[i].len,
- DMA_TO_DEVICE);
- }
+ for (i = 0; i < DMA_TX_SIZE; i++)
+ stmmac_free_tx_buffer(priv, queue, i);
+}
- if (priv->tx_skbuff[i]) {
- dev_kfree_skb_any(priv->tx_skbuff[i]);
- priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- }
+
+/**
+ * free_dma_rx_desc_resources - free RX dma desc resources
+ * @priv: private structure
@ -3426,11 +3419,10 @@
+
+ kfree(rx_q->rx_skbuff_dma);
+ kfree(rx_q->rx_skbuff);
}
}
/**
- * alloc_dma_desc_resources - alloc TX/RX resources.
+ }
+}
+
+/**
+ * free_dma_tx_desc_resources - free TX dma desc resources
+ * @priv: private structure
+ */
@ -3463,90 +3455,36 @@
+
+/**
+ * alloc_dma_rx_desc_resources - alloc RX resources.
* @priv: private structure
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
{
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
int ret = -ENOMEM;
+ int ret = -ENOMEM;
+ u32 queue;
- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- return -ENOMEM;
+
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
- sizeof(*priv->tx_skbuff_dma),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
- if (priv->extend_desc) {
- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
+
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
+
+ rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
+ sizeof(dma_addr_t),
GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
+ GFP_KERNEL);
+ if (!rx_q->rx_skbuff_dma)
+ return -ENOMEM;
- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+
+ rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!rx_q->rx_skbuff)
goto err_dma;
+ goto err_dma;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_zalloc_coherent(priv->device,
@ -3567,19 +3505,12 @@
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ goto err_dma;
}
}
return 0;
err_dma:
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
+ }
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_rx_desc_resources(priv);
+
+ return ret;
@ -3636,7 +3567,7 @@
+ GFP_KERNEL);
+ if (!tx_q->dma_tx)
+ goto err_dma_buffers;
+ }
}
+ }
+
+ return 0;
@ -3644,9 +3575,9 @@
+err_dma_buffers:
+ free_dma_tx_desc_resources(priv);
+
return ret;
}
+ return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
@ -3672,78 +3603,119 @@
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
- /* Release the DMA TX/RX socket buffers */
- dma_free_rx_skbufs(priv);
- dma_free_tx_skbufs(priv);
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc) {
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- }
- kfree(priv->rx_skbuff_dma);
- kfree(priv->rx_skbuff);
- kfree(priv->tx_skbuff_dma);
- kfree(priv->tx_skbuff);
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA RX socket buffers */
+ free_dma_rx_desc_resources(priv);
+
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv);
}
/**
@@ -1256,19 +1640,104 @@ static void free_dma_desc_resources(stru
*/
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
- int rx_count = priv->dma_cap.number_rx_queues;
- int queue = 0;
+}
+
+/**
+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ * @priv: driver private structure
+ * Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
- /* If GMAC does not have multiple queues, then this is not necessary*/
- if (rx_count == 1)
- return;
- if (priv->tx_skbuff[i]) {
- dev_kfree_skb_any(priv->tx_skbuff[i]);
- priv->tx_skbuff[i] = NULL;
- priv->tx_skbuff_dma[i].buf = 0;
- priv->tx_skbuff_dma[i].map_as_page = false;
- }
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
+ }
+}
}
}
- /**
- * If the core is synthesized with multiple rx queues / multiple
- * dma channels, then rx queues will be disabled by default.
- * For now only rx queue 0 is enabled.
- */
- priv->hw->mac->rx_queue_enable(priv->hw, queue);
+/**
/**
- * alloc_dma_desc_resources - alloc TX/RX resources.
- * @priv: private structure
- * Description: according to which descriptor can be used (extend or basic)
- * this function allocates the resources for TX and RX paths. In case of
- * reception, for example, it pre-allocated the RX socket buffer in order to
- * allow zero-copy mechanism.
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
+ */
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
{
- int ret = -ENOMEM;
-
- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!priv->rx_skbuff_dma)
- return -ENOMEM;
-
- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->rx_skbuff)
- goto err_rx_skbuff;
-
- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
- sizeof(*priv->tx_skbuff_dma),
- GFP_KERNEL);
- if (!priv->tx_skbuff_dma)
- goto err_tx_skbuff_dma;
-
- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!priv->tx_skbuff)
- goto err_tx_skbuff;
-
- if (priv->extend_desc) {
- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_erx)
- goto err_dma;
-
- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct
- dma_extended_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_etx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- goto err_dma;
- }
- } else {
- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_rx_phy,
- GFP_KERNEL);
- if (!priv->dma_rx)
- goto err_dma;
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ priv->hw->dma->start_rx(priv->ioaddr, chan);
+}
+
- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_desc),
- &priv->dma_tx_phy,
- GFP_KERNEL);
- if (!priv->dma_tx) {
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- goto err_dma;
- }
- }
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
@ -3756,7 +3728,8 @@
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ priv->hw->dma->start_tx(priv->ioaddr, chan);
+}
+
- return 0;
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
@ -3769,7 +3742,16 @@
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_rx(priv->ioaddr, chan);
+}
+
-err_dma:
- kfree(priv->tx_skbuff);
-err_tx_skbuff:
- kfree(priv->tx_skbuff_dma);
-err_tx_skbuff_dma:
- kfree(priv->rx_skbuff);
-err_rx_skbuff:
- kfree(priv->rx_skbuff_dma);
- return ret;
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
@ -3781,8 +3763,9 @@
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ priv->hw->dma->stop_tx(priv->ioaddr, chan);
+}
+
}
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
@ -3790,7 +3773,31 @@
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
+{
{
- /* Release the DMA TX/RX socket buffers */
- dma_free_rx_skbufs(priv);
- dma_free_tx_skbufs(priv);
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc) {
- dma_free_coherent(priv->device,
- DMA_TX_SIZE * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
- } else {
- dma_free_coherent(priv->device, DMA_TX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, DMA_RX_SIZE *
- sizeof(struct dma_extended_desc),
- priv->dma_erx, priv->dma_rx_phy);
- }
- kfree(priv->rx_skbuff_dma);
- kfree(priv->rx_skbuff);
- kfree(priv->tx_skbuff_dma);
- kfree(priv->tx_skbuff);
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
@ -3800,23 +3807,38 @@
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
+}
+
+/**
}
/**
- * stmmac_mac_enable_rx_queues - Enable MAC rx queues
- * @priv: driver private structure
- * Description: It is used for enabling the rx queues in the MAC
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
+ */
*/
-static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+{
{
- int rx_count = priv->dma_cap.number_rx_queues;
- int queue = 0;
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
- /* If GMAC does not have multiple queues, then this is not necessary*/
- if (rx_count == 1)
- return;
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
+
- /**
- * If the core is synthesized with multiple rx queues / multiple
- * dma channels, then rx queues will be disabled by default.
- * For now only rx queue 0 is enabled.
- */
- priv->hw->mac->rx_queue_enable(priv->hw, queue);
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
}

@ -1,7 +1,5 @@
Index: linux-4.9.51/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
===================================================================
--- linux-4.9.51.orig/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ linux-4.9.51/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -53,6 +53,7 @@
aliases {

@ -7,10 +7,8 @@ Subject: [PATCH] ARM: dts: sun8i: nanopi-neo: enable UART, USB and I2C pins
arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts | 43 ++++++++++++++++++++++++++++---
1 file changed, 40 insertions(+), 3 deletions(-)
Index: linux-4.9.51/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
===================================================================
--- linux-4.9.51.orig/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ linux-4.9.51/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -78,10 +78,30 @@
};
};

Loading…
Cancel
Save