From f339945a8e81fff22df95284e142b79c37fd2333 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Thu, 5 Jul 2018 16:07:09 +0800 Subject: [PATCH 02/32] core-linux: support layerscape This is an integrated patch for layerscape core-linux support. Signed-off-by: Madalin Bucur Signed-off-by: Zhao Qiang Signed-off-by: Camelia Groza Signed-off-by: Madalin Bucur Signed-off-by: Zhang Ying-22455 Signed-off-by: Ramneek Mehresh Signed-off-by: Jarod Wilson Signed-off-by: Nikhil Badola Signed-off-by: stephen hemminger Signed-off-by: Arnd Bergmann Signed-off-by: Yangbo Lu --- drivers/base/devres.c | 66 ++++++ drivers/base/soc.c | 70 ++++++ .../net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- .../mellanox/mlxsw/spectrum_switchdev.c | 2 +- drivers/net/ethernet/rocker/rocker_ofdpa.c | 4 +- include/linux/device.h | 19 ++ include/linux/dma-mapping.h | 5 + include/linux/fsl/svr.h | 97 ++++++++ include/linux/fsl_devices.h | 3 + include/linux/irqdesc.h | 4 + include/linux/irqdomain.h | 13 +- include/linux/netdev_features.h | 2 + include/linux/netdevice.h | 10 +- include/linux/skbuff.h | 2 + include/linux/sys_soc.h | 3 + include/net/switchdev.h | 8 +- include/uapi/linux/if_ether.h | 1 + kernel/irq/Kconfig | 11 + kernel/irq/Makefile | 1 + kernel/irq/debugfs.c | 215 ++++++++++++++++++ kernel/irq/internals.h | 22 ++ kernel/irq/irqdesc.c | 1 + kernel/irq/irqdomain.c | 171 ++++++++++---- kernel/irq/manage.c | 1 + kernel/irq/msi.c | 2 +- net/bridge/br.c | 4 +- net/bridge/br_fdb.c | 2 + net/bridge/br_private.h | 7 + net/bridge/br_switchdev.c | 33 +++ net/core/dev.c | 30 ++- net/core/net-sysfs.c | 20 +- net/core/rtnetlink.c | 4 +- net/core/skbuff.c | 29 ++- net/sched/sch_generic.c | 7 + 34 files changed, 809 insertions(+), 62 deletions(-) create mode 100644 include/linux/fsl/svr.h create mode 100644 kernel/irq/debugfs.c --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "base.h" @@ -985,3 +986,68 @@ void devm_free_pages(struct device *dev, &devres)); } EXPORT_SYMBOL_GPL(devm_free_pages); + +static void devm_percpu_release(struct device *dev, void *pdata) +{ + void __percpu *p; + + p = *(void __percpu **)pdata; + free_percpu(p); +} + +static int devm_percpu_match(struct device *dev, void *data, void *p) +{ + struct devres *devr = container_of(data, struct devres, data); + + return *(void **)devr->data == p; +} + +/** + * __devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @size: Size of per-cpu memory to allocate + * @align: Alignment of per-cpu memory to allocate + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align) +{ + void *p; + void __percpu *pcpu; + + pcpu = __alloc_percpu(size, align); + if (!pcpu) + return NULL; + + p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); + if (!p) { + free_percpu(pcpu); + return NULL; + } + + *(void __percpu **)p = pcpu; + + devres_add(dev, p); + + return pcpu; +} +EXPORT_SYMBOL_GPL(__devm_alloc_percpu); + +/** + * devm_free_percpu - Resource-managed free_percpu + * @dev: Device this memory belongs to + * @pdata: Per-cpu memory to free + * + * Free memory allocated with devm_alloc_percpu(). + */ +void devm_free_percpu(struct device *dev, void __percpu *pdata) +{ + WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, + (void *)pdata)); +} +EXPORT_SYMBOL_GPL(devm_free_percpu); --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -13,6 +13,7 @@ #include #include #include +#include static DEFINE_IDA(soc_ida); @@ -159,3 +160,72 @@ static int __init soc_bus_register(void) return bus_register(&soc_bus_type); } core_initcall(soc_bus_register); + +static int soc_device_match_one(struct device *dev, void *arg) +{ + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); + const struct soc_device_attribute *match = arg; + + if (match->machine && + (!soc_dev->attr->machine || + !glob_match(match->machine, soc_dev->attr->machine))) + return 0; + + if (match->family && + (!soc_dev->attr->family || + !glob_match(match->family, soc_dev->attr->family))) + return 0; + + if (match->revision && + (!soc_dev->attr->revision || + !glob_match(match->revision, soc_dev->attr->revision))) + return 0; + + if (match->soc_id && + (!soc_dev->attr->soc_id || + !glob_match(match->soc_id, soc_dev->attr->soc_id))) + return 0; + + return 1; +} + +/* + * soc_device_match - identify the SoC in the machine + * @matches: zero-terminated array of possible matches + * + * returns the first matching entry of the argument array, or NULL + * if none of them match. + * + * This function is meant as a helper in place of of_match_node() + * in cases where either no device tree is available or the information + * in a device node is insufficient to identify a particular variant + * by its compatible strings or other properties. For new devices, + * the DT binding should always provide unique compatible strings + * that allow the use of of_match_node() instead. + * + * The calling function can use the .data entry of the + * soc_device_attribute to pass a structure or function pointer for + * each entry. + */ +const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches) +{ + int ret = 0; + + if (!matches) + return NULL; + + while (!ret) { + if (!(matches->machine || matches->family || + matches->revision || matches->soc_id)) + break; + ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches, + soc_device_match_one); + if (!ret) + matches++; + else + return matches; + } + return NULL; +} +EXPORT_SYMBOL_GPL(soc_device_match); --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -859,7 +859,7 @@ mlxsw_sp_port_get_sw_stats64(const struc return 0; } -static bool mlxsw_sp_port_has_offload_stats(int attr_id) +static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1405,7 +1405,7 @@ static void mlxsw_sp_fdb_call_notifiers( if (learning_sync) { info.addr = mac; info.vid = vid; - notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; + notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; call_switchdev_notifiers(notifier_type, dev, &info.info); } } --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1939,10 +1939,10 @@ static void ofdpa_port_fdb_learn_work(st rtnl_lock(); if (learned && removing) - call_switchdev_notifiers(SWITCHDEV_FDB_DEL, + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, lw->ofdpa_port->dev, &info.info); else if (learned && !removing) - call_switchdev_notifiers(SWITCHDEV_FDB_ADD, + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, lw->ofdpa_port->dev, &info.info); rtnl_unlock(); --- a/include/linux/device.h +++ b/include/linux/device.h @@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(stru int devm_add_action(struct device *dev, void (*action)(void *), void *data); void devm_remove_action(struct device *dev, void (*action)(void *), void *data); +/** + * devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @type: Type to allocate per-cpu memory for + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +#define devm_alloc_percpu(dev, type) \ + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ + __alignof__(type))) + +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align); +void devm_free_percpu(struct device *dev, void __percpu *pdata); + static inline int devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data) { --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -164,6 +164,11 @@ int dma_mmap_from_coherent(struct device #ifdef CONFIG_HAS_DMA #include +static inline void set_dma_ops(struct device *dev, + struct dma_map_ops *dma_ops) +{ + dev->archdata.dma_ops = dma_ops; +} #else /* * Define the dma api to allow compilation but not linking of --- /dev/null +++ b/include/linux/fsl/svr.h @@ -0,0 +1,97 @@ +/* + * MPC85xx cpu type detection + * + * Copyright 2011-2012 Freescale Semiconductor, Inc. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef FSL_SVR_H +#define FSL_SVR_H + +#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ +#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ +#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ + +/* Some parts define SVR[0:23] as the SOC version */ +#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ + +#define SVR_8533 0x803400 +#define SVR_8535 0x803701 +#define SVR_8536 0x803700 +#define SVR_8540 0x803000 +#define SVR_8541 0x807200 +#define SVR_8543 0x803200 +#define SVR_8544 0x803401 +#define SVR_8545 0x803102 +#define SVR_8547 0x803101 +#define SVR_8548 0x803100 +#define SVR_8555 0x807100 +#define SVR_8560 0x807000 +#define SVR_8567 0x807501 +#define SVR_8568 0x807500 +#define SVR_8569 0x808000 +#define SVR_8572 0x80E000 +#define SVR_P1010 0x80F100 +#define SVR_P1011 0x80E500 +#define SVR_P1012 0x80E501 +#define SVR_P1013 0x80E700 +#define SVR_P1014 0x80F101 +#define SVR_P1017 0x80F700 +#define SVR_P1020 0x80E400 +#define SVR_P1021 0x80E401 +#define SVR_P1022 0x80E600 +#define SVR_P1023 0x80F600 +#define SVR_P1024 0x80E402 +#define SVR_P1025 0x80E403 +#define SVR_P2010 0x80E300 +#define SVR_P2020 0x80E200 +#define SVR_P2040 0x821000 +#define SVR_P2041 0x821001 +#define SVR_P3041 0x821103 +#define SVR_P4040 0x820100 +#define SVR_P4080 0x820000 +#define SVR_P5010 0x822100 +#define SVR_P5020 0x822000 +#define SVR_P5021 0X820500 +#define SVR_P5040 0x820400 +#define SVR_T4240 0x824000 +#define SVR_T4120 0x824001 +#define SVR_T4160 0x824100 +#define SVR_T4080 0x824102 +#define SVR_C291 0x850000 +#define SVR_C292 0x850020 +#define SVR_C293 0x850030 +#define SVR_B4860 0X868000 +#define SVR_G4860 0x868001 +#define SVR_G4060 0x868003 +#define SVR_B4440 0x868100 +#define SVR_G4440 0x868101 +#define SVR_B4420 0x868102 +#define SVR_B4220 0x868103 +#define SVR_T1040 0x852000 +#define SVR_T1041 0x852001 +#define SVR_T1042 0x852002 +#define SVR_T1020 0x852100 +#define SVR_T1021 0x852101 +#define SVR_T1022 0x852102 +#define SVR_T1023 0x854100 +#define SVR_T1024 0x854000 +#define SVR_T2080 0x853000 +#define SVR_T2081 0x853100 + +#define SVR_8610 0x80A000 +#define SVR_8641 0x809000 +#define SVR_8641D 0x809001 + +#define SVR_9130 0x860001 +#define SVR_9131 0x860000 +#define SVR_9132 0x861000 +#define SVR_9232 0x861400 + +#define SVR_Unknown 0xFFFFFF + +#endif --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -99,7 +99,10 @@ struct fsl_usb2_platform_data { unsigned suspended:1; unsigned already_suspended:1; unsigned has_fsl_erratum_a007792:1; + unsigned has_fsl_erratum_14:1; unsigned has_fsl_erratum_a005275:1; + unsigned has_fsl_erratum_a006918:1; + unsigned has_fsl_erratum_a005697:1; unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -46,6 +46,7 @@ struct pt_regs; * @rcu: rcu head for delayed free * @kobj: kobject used to represent this struct in sysfs * @dir: /proc/irq/ procfs entry + * @debugfs_file: dentry for the debugfs file * @name: flow handler name for /proc/interrupts output */ struct irq_desc { @@ -88,6 +89,9 @@ struct irq_desc { #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + struct dentry *debugfs_file; +#endif #ifdef CONFIG_SPARSE_IRQ struct rcu_head rcu; struct kobject kobj; --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -138,6 +138,7 @@ struct irq_domain_chip_generic; * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains + * @debugfs_file: dentry for the domain debugfs file * * Revmap data, used internally by irq_domain * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that @@ -160,6 +161,9 @@ struct irq_domain { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_domain *parent; #endif +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS + struct dentry *debugfs_file; +#endif /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; @@ -174,8 +178,8 @@ enum { /* Irq domain is hierarchical */ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), - /* Core calls alloc/free recursive through the domain hierarchy. */ - IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), + /* Irq domain name was allocated in __irq_domain_add() */ + IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6), /* Irq domain is an IPI domain with virq per cpu */ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), @@ -231,6 +235,9 @@ static inline bool is_fwnode_irqchip(str return fwnode && fwnode->type == FWNODE_IRQCHIP; } +extern void irq_domain_update_bus_token(struct irq_domain *domain, + enum irq_domain_bus_token bus_token); + static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) @@ -403,7 +410,7 @@ static inline int irq_domain_alloc_irqs( NULL); } -extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, +extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -74,6 +74,7 @@ enum { NETIF_F_BUSY_POLL_BIT, /* Busy poll */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ + NETIF_F_HW_ACCEL_MQ_BIT, /* Hardware-accelerated multiqueue */ /* * Add your fresh new feature above and remember to update @@ -136,6 +137,7 @@ enum { #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) #define NETIF_F_HW_TC __NETIF_F(HW_TC) +#define NETIF_F_HW_ACCEL_MQ __NETIF_F(HW_ACCEL_MQ) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -930,7 +930,7 @@ struct netdev_xdp { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * bool (*ndo_has_offload_stats)(int attr_id) + * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) * Return true if this device supports offload stats of this attr_id. * * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, @@ -1167,7 +1167,7 @@ struct net_device_ops { struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, struct rtnl_link_stats64 *storage); - bool (*ndo_has_offload_stats)(int attr_id); + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, void *attr_data); @@ -1509,6 +1509,8 @@ enum netdev_priv_flags { * @if_port: Selectable AUI, TP, ... * @dma: DMA channel * @mtu: Interface MTU value + * @min_mtu: Interface Minimum MTU value + * @max_mtu: Interface Maximum MTU value * @type: Interface hardware type * @hard_header_len: Maximum hardware header length. * @min_header_len: Minimum hardware header length @@ -1735,6 +1737,8 @@ struct net_device { unsigned char dma; unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; unsigned short type; unsigned short hard_header_len; unsigned short min_header_len; @@ -1938,6 +1942,8 @@ int netdev_set_prio_tc_map(struct net_de return 0; } +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); + static inline void netdev_reset_tc(struct net_device *dev) { --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -908,6 +908,7 @@ void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); +void skb_recycle(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; @@ -3081,6 +3082,7 @@ static inline void skb_free_datagram_loc } int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum); --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h @@ -13,6 +13,7 @@ struct soc_device_attribute { const char *family; const char *revision; const char *soc_id; + const void *data; }; /** @@ -34,4 +35,6 @@ void soc_device_unregister(struct soc_de */ struct device *soc_device_to_device(struct soc_device *soc); +const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches); #endif /* __SOC_BUS_H */ --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -46,6 +46,7 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_PORT_PARENT_ID, SWITCHDEV_ATTR_ID_PORT_STP_STATE, SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT, SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, }; @@ -60,6 +61,7 @@ struct switchdev_attr { struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ u8 stp_state; /* PORT_STP_STATE */ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ + unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ } u; @@ -149,8 +151,10 @@ struct switchdev_ops { }; enum switchdev_notifier_type { - SWITCHDEV_FDB_ADD = 1, - SWITCHDEV_FDB_DEL, + SWITCHDEV_FDB_ADD_TO_BRIDGE = 1, + SWITCHDEV_FDB_DEL_TO_BRIDGE, + SWITCHDEV_FDB_ADD_TO_DEVICE, + SWITCHDEV_FDB_DEL_TO_DEVICE, }; struct switchdev_notifier_info { --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -36,6 +36,7 @@ #define ETH_DATA_LEN 1500 /* Max. octets in payload */ #define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ #define ETH_FCS_LEN 4 /* Octets in the FCS */ +#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */ /* * These are the defined Ethernet Protocol ID's. --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -108,4 +108,15 @@ config SPARSE_IRQ If you don't know what to do here, say N. +config GENERIC_IRQ_DEBUGFS + bool "Expose irq internals in debugfs" + depends on DEBUG_FS + default n + ---help--- + + Exposes internal state information through debugfs. Mostly for + developers and debugging of hard to diagnose interrupt problems. + + If you don't know what to do here, say N. + endmenu --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o obj-$(CONFIG_SMP) += affinity.o +obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o --- /dev/null +++ b/kernel/irq/debugfs.c @@ -0,0 +1,215 @@ +/* + * Copyright 2017 Thomas Gleixner + * + * This file is licensed under the GPL V2. + */ +#include +#include +#include + +#include "internals.h" + +static struct dentry *irq_dir; + +struct irq_bit_descr { + unsigned int mask; + char *name; +}; +#define BIT_MASK_DESCR(m) { .mask = m, .name = #m } + +static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state, + const struct irq_bit_descr *sd, int size) +{ + int i; + + for (i = 0; i < size; i++, sd++) { + if (state & sd->mask) + seq_printf(m, "%*s%s\n", ind + 12, "", sd->name); + } +} + +#ifdef CONFIG_SMP +static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) +{ + struct irq_data *data = irq_desc_get_irq_data(desc); + struct cpumask *msk; + + msk = irq_data_get_affinity_mask(data); + seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk)); +#ifdef CONFIG_GENERIC_PENDING_IRQ + msk = desc->pending_mask; + seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk)); +#endif +} +#else +static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { } +#endif + +static const struct irq_bit_descr irqchip_flags[] = { + BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED), + BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED), + BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND), + BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED), + BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), + BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), + BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), +}; + +static void +irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind) +{ + struct irq_chip *chip = data->chip; + + if (!chip) { + seq_printf(m, "chip: None\n"); + return; + } + seq_printf(m, "%*schip: %s\n", ind, "", chip->name); + seq_printf(m, "%*sflags: 0x%lx\n", ind + 1, "", chip->flags); + irq_debug_show_bits(m, ind, chip->flags, irqchip_flags, + ARRAY_SIZE(irqchip_flags)); +} + +static void +irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind) +{ + seq_printf(m, "%*sdomain: %s\n", ind, "", + data->domain ? data->domain->name : ""); + seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq); + irq_debug_show_chip(m, data, ind + 1); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + if (!data->parent_data) + return; + seq_printf(m, "%*sparent:\n", ind + 1, ""); + irq_debug_show_data(m, data->parent_data, ind + 4); +#endif +} + +static const struct irq_bit_descr irqdata_states[] = { + BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING), + BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING), + BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH), + BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW), + BIT_MASK_DESCR(IRQD_LEVEL), + + BIT_MASK_DESCR(IRQD_ACTIVATED), + BIT_MASK_DESCR(IRQD_IRQ_STARTED), + BIT_MASK_DESCR(IRQD_IRQ_DISABLED), + BIT_MASK_DESCR(IRQD_IRQ_MASKED), + BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS), + + BIT_MASK_DESCR(IRQD_PER_CPU), + BIT_MASK_DESCR(IRQD_NO_BALANCING), + + BIT_MASK_DESCR(IRQD_MOVE_PCNTXT), + BIT_MASK_DESCR(IRQD_AFFINITY_SET), + BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), + BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), + BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), + + BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), + + BIT_MASK_DESCR(IRQD_WAKEUP_STATE), + BIT_MASK_DESCR(IRQD_WAKEUP_ARMED), +}; + +static const struct irq_bit_descr irqdesc_states[] = { + BIT_MASK_DESCR(_IRQ_NOPROBE), + BIT_MASK_DESCR(_IRQ_NOREQUEST), + BIT_MASK_DESCR(_IRQ_NOTHREAD), + BIT_MASK_DESCR(_IRQ_NOAUTOEN), + BIT_MASK_DESCR(_IRQ_NESTED_THREAD), + BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID), + BIT_MASK_DESCR(_IRQ_IS_POLLED), + BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY), +}; + +static const struct irq_bit_descr irqdesc_istates[] = { + BIT_MASK_DESCR(IRQS_AUTODETECT), + BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED), + BIT_MASK_DESCR(IRQS_POLL_INPROGRESS), + BIT_MASK_DESCR(IRQS_ONESHOT), + BIT_MASK_DESCR(IRQS_REPLAY), + BIT_MASK_DESCR(IRQS_WAITING), + BIT_MASK_DESCR(IRQS_PENDING), + BIT_MASK_DESCR(IRQS_SUSPENDED), +}; + + +static int irq_debug_show(struct seq_file *m, void *p) +{ + struct irq_desc *desc = m->private; + struct irq_data *data; + + raw_spin_lock_irq(&desc->lock); + data = irq_desc_get_irq_data(desc); + seq_printf(m, "handler: %pf\n", desc->handle_irq); + seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors); + irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states, + ARRAY_SIZE(irqdesc_states)); + seq_printf(m, "istate: 0x%08x\n", desc->istate); + irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates, + ARRAY_SIZE(irqdesc_istates)); + seq_printf(m, "ddepth: %u\n", desc->depth); + seq_printf(m, "wdepth: %u\n", desc->wake_depth); + seq_printf(m, "dstate: 0x%08x\n", irqd_get(data)); + irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states, + ARRAY_SIZE(irqdata_states)); + seq_printf(m, "node: %d\n", irq_data_get_node(data)); + irq_debug_show_masks(m, desc); + irq_debug_show_data(m, data, 0); + raw_spin_unlock_irq(&desc->lock); + return 0; +} + +static int irq_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, irq_debug_show, inode->i_private); +} + +static const struct file_operations dfs_irq_ops = { + .open = irq_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) +{ + char name [10]; + + if (!irq_dir || !desc || desc->debugfs_file) + return; + + sprintf(name, "%d", irq); + desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc, + &dfs_irq_ops); +} + +void irq_remove_debugfs_entry(struct irq_desc *desc) +{ + if (desc->debugfs_file) + debugfs_remove(desc->debugfs_file); +} + +static int __init irq_debugfs_init(void) +{ + struct dentry *root_dir; + int irq; + + root_dir = debugfs_create_dir("irq", NULL); + if (!root_dir) + return -ENOMEM; + + irq_domain_debugfs_init(root_dir); + + irq_dir = debugfs_create_dir("irqs", root_dir); + + irq_lock_sparse(); + for_each_active_irq(irq) + irq_add_debugfs_entry(irq, irq_to_desc(irq)); + irq_unlock_sparse(); + + return 0; +} +__initcall(irq_debugfs_init); --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -169,6 +169,11 @@ irq_put_desc_unlock(struct irq_desc *des #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) +static inline unsigned int irqd_get(struct irq_data *d) +{ + return __irqd_to_state(d); +} + /* * Manipulation functions for irq_data.state */ @@ -226,3 +231,20 @@ irq_pm_install_action(struct irq_desc *d static inline void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } #endif + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); +void irq_remove_debugfs_entry(struct irq_desc *desc); +# ifdef CONFIG_IRQ_DOMAIN +void irq_domain_debugfs_init(struct dentry *root); +# else +static inline void irq_domain_debugfs_init(struct dentry *root); +# endif +#else /* CONFIG_GENERIC_IRQ_DEBUGFS */ +static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) +{ +} +static inline void irq_remove_debugfs_entry(struct irq_desc *d) +{ +} +#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -394,6 +394,7 @@ static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); + irq_remove_debugfs_entry(desc); unregister_irq_proc(irq, desc); /* --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -31,6 +31,14 @@ struct irqchip_fwid { void *data; }; +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +static void debugfs_add_domain_dir(struct irq_domain *d); +static void debugfs_remove_domain_dir(struct irq_domain *d); +#else +static inline void debugfs_add_domain_dir(struct irq_domain *d) { } +static inline void debugfs_remove_domain_dir(struct irq_domain *d) { } +#endif + /** * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for * identifying an irq domain @@ -117,6 +125,7 @@ struct irq_domain *__irq_domain_add(stru irq_domain_check_hierarchy(domain); mutex_lock(&irq_domain_mutex); + debugfs_add_domain_dir(domain); list_add(&domain->link, &irq_domain_list); mutex_unlock(&irq_domain_mutex); @@ -136,6 +145,7 @@ EXPORT_SYMBOL_GPL(__irq_domain_add); void irq_domain_remove(struct irq_domain *domain) { mutex_lock(&irq_domain_mutex); + debugfs_remove_domain_dir(domain); WARN_ON(!radix_tree_empty(&domain->revmap_tree)); @@ -156,6 +166,37 @@ void irq_domain_remove(struct irq_domain } EXPORT_SYMBOL_GPL(irq_domain_remove); +void irq_domain_update_bus_token(struct irq_domain *domain, + enum irq_domain_bus_token bus_token) +{ + char *name; + + if (domain->bus_token == bus_token) + return; + + mutex_lock(&irq_domain_mutex); + + domain->bus_token = bus_token; + + name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token); + if (!name) { + mutex_unlock(&irq_domain_mutex); + return; + } + + debugfs_remove_domain_dir(domain); + + if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) + kfree(domain->name); + else + domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; + + domain->name = name; + debugfs_add_domain_dir(domain); + + mutex_unlock(&irq_domain_mutex); +} + /** * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs * @of_node: pointer to interrupt controller's device tree node. @@ -1164,43 +1205,18 @@ void irq_domain_free_irqs_top(struct irq irq_domain_free_irqs_common(domain, virq, nr_irqs); } -static bool irq_domain_is_auto_recursive(struct irq_domain *domain) -{ - return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; -} - -static void irq_domain_free_irqs_recursive(struct irq_domain *domain, +static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { domain->ops->free(domain, irq_base, nr_irqs); - if (irq_domain_is_auto_recursive(domain)) { - BUG_ON(!domain->parent); - irq_domain_free_irqs_recursive(domain->parent, irq_base, - nr_irqs); - } } -int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, +int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { - int ret = 0; - struct irq_domain *parent = domain->parent; - bool recursive = irq_domain_is_auto_recursive(domain); - - BUG_ON(recursive && !parent); - if (recursive) - ret = irq_domain_alloc_irqs_recursive(parent, irq_base, - nr_irqs, arg); - if (ret < 0) - return ret; - - ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); - if (ret < 0 && recursive) - irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); - - return ret; + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } /** @@ -1261,7 +1277,7 @@ int __irq_domain_alloc_irqs(struct irq_d } mutex_lock(&irq_domain_mutex); - ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg); if (ret < 0) { mutex_unlock(&irq_domain_mutex); goto out_free_irq_data; @@ -1296,7 +1312,7 @@ void irq_domain_free_irqs(unsigned int v mutex_lock(&irq_domain_mutex); for (i = 0; i < nr_irqs; i++) irq_domain_remove_irq(virq + i); - irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); + irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs); mutex_unlock(&irq_domain_mutex); irq_domain_free_irq_data(virq, nr_irqs); @@ -1316,15 +1332,11 @@ int irq_domain_alloc_irqs_parent(struct unsigned int irq_base, unsigned int nr_irqs, void *arg) { - /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ - if (irq_domain_is_auto_recursive(domain)) - return 0; + if (!domain->parent) + return -ENOSYS; - domain = domain->parent; - if (domain) - return irq_domain_alloc_irqs_recursive(domain, irq_base, - nr_irqs, arg); - return -ENOSYS; + return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base, + nr_irqs, arg); } EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); @@ -1339,10 +1351,10 @@ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_ void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { - /* irq_domain_free_irqs_recursive() will call parent's free */ - if (!irq_domain_is_auto_recursive(domain) && domain->parent) - irq_domain_free_irqs_recursive(domain->parent, irq_base, - nr_irqs); + if (!domain->parent) + return; + + irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs); } EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); @@ -1448,3 +1460,78 @@ static void irq_domain_check_hierarchy(s { } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +static struct dentry *domain_dir; + +static void +irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind) +{ + seq_printf(m, "%*sname: %s\n", ind, "", d->name); + seq_printf(m, "%*ssize: %u\n", ind + 1, "", + d->revmap_size + d->revmap_direct_max_irq); + seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount); + seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + if (!d->parent) + return; + seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name); + irq_domain_debug_show_one(m, d->parent, ind + 4); +#endif +} + +static int irq_domain_debug_show(struct seq_file *m, void *p) +{ + struct irq_domain *d = m->private; + + /* Default domain? Might be NULL */ + if (!d) { + if (!irq_default_domain) + return 0; + d = irq_default_domain; + } + irq_domain_debug_show_one(m, d, 0); + return 0; +} + +static int irq_domain_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, irq_domain_debug_show, inode->i_private); +} + +static const struct file_operations dfs_domain_ops = { + .open = irq_domain_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void debugfs_add_domain_dir(struct irq_domain *d) +{ + if (!d->name || !domain_dir || d->debugfs_file) + return; + d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d, + &dfs_domain_ops); +} + +static void debugfs_remove_domain_dir(struct irq_domain *d) +{ + if (d->debugfs_file) + debugfs_remove(d->debugfs_file); +} + +void __init irq_domain_debugfs_init(struct dentry *root) +{ + struct irq_domain *d; + + domain_dir = debugfs_create_dir("domains", root); + if (!domain_dir) + return; + + debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops); + mutex_lock(&irq_domain_mutex); + list_for_each_entry(d, &irq_domain_list, link) + debugfs_add_domain_dir(d); + mutex_unlock(&irq_domain_mutex); +} +#endif --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1391,6 +1391,7 @@ __setup_irq(unsigned int irq, struct irq wake_up_process(new->secondary->thread); register_irq_proc(irq, desc); + irq_add_debugfs_entry(irq, desc); new->dir = NULL; register_handler_proc(irq, new); free_cpumask_var(mask); --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -310,7 +310,7 @@ int msi_domain_populate_irqs(struct irq_ ops->set_desc(arg, desc); /* Assumes the domain mutex is held! */ - ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg); + ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); if (ret) break; --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -138,14 +138,14 @@ static int br_switchdev_event(struct not br = p->br; switch (event) { - case SWITCHDEV_FDB_ADD: + case SWITCHDEV_FDB_ADD_TO_BRIDGE: fdb_info = ptr; err = br_fdb_external_learn_add(br, p, fdb_info->addr, fdb_info->vid); if (err) err = notifier_from_errno(err); break; - case SWITCHDEV_FDB_DEL: + case SWITCHDEV_FDB_DEL_TO_BRIDGE: fdb_info = ptr; err = br_fdb_external_learn_del(br, p, fdb_info->addr, fdb_info->vid); --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -688,6 +688,8 @@ static void fdb_notify(struct net_bridge struct sk_buff *skb; int err = -ENOBUFS; + br_switchdev_fdb_notify(fdb, type); + skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) goto errout; --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -1060,6 +1060,8 @@ void nbp_switchdev_frame_mark(const stru struct sk_buff *skb); bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p, const struct sk_buff *skb); +void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, + int type); #else static inline int nbp_switchdev_mark_set(struct net_bridge_port *p) { @@ -1076,6 +1078,11 @@ static inline bool nbp_switchdev_allowed { return true; } + +static inline void +br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) +{ +} #endif /* CONFIG_NET_SWITCHDEV */ #endif --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c @@ -55,3 +55,36 @@ bool nbp_switchdev_allowed_egress(const return !skb->offload_fwd_mark || BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark; } + +static void +br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac, + u16 vid, struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info; + unsigned long notifier_type; + + info.addr = mac; + info.vid = vid; + notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE; + call_switchdev_notifiers(notifier_type, dev, &info.info); +} + +void +br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) +{ + if (!fdb->added_by_user) + return; + + switch (type) { + case RTM_DELNEIGH: + br_switchdev_fdb_call_notifiers(false, fdb->addr.addr, + fdb->vlan_id, + fdb->dst->dev); + break; + case RTM_NEWNEIGH: + br_switchdev_fdb_call_notifiers(true, fdb->addr.addr, + fdb->vlan_id, + fdb->dst->dev); + break; + } +} --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1990,6 +1990,23 @@ static void netif_setup_tc(struct net_de } } +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) +{ + if (dev->num_tc) { + struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; + int i; + + for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { + if ((txq - tc->offset) < tc->count) + return i; + } + + return -1; + } + + return 0; +} + #ifdef CONFIG_XPS static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ @@ -6656,9 +6673,18 @@ int dev_set_mtu(struct net_device *dev, if (new_mtu == dev->mtu) return 0; - /* MTU must be positive. */ - if (new_mtu < 0) + /* MTU must be positive, and in range */ + if (new_mtu < 0 || new_mtu < dev->min_mtu) { + net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n", + dev->name, new_mtu, dev->min_mtu); return -EINVAL; + } + + if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { + net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n", + dev->name, new_mtu, dev->min_mtu); + return -EINVAL; + } if (!netif_device_present(dev)) return -ENODEV; --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1021,7 +1021,6 @@ static ssize_t show_trans_timeout(struct return sprintf(buf, "%lu", trans_timeout); } -#ifdef CONFIG_XPS static unsigned int get_netdev_queue_index(struct netdev_queue *queue) { struct net_device *dev = queue->dev; @@ -1033,6 +1032,21 @@ static unsigned int get_netdev_queue_ind return i; } +static ssize_t show_traffic_class(struct netdev_queue *queue, + struct netdev_queue_attribute *attribute, + char *buf) +{ + struct net_device *dev = queue->dev; + int index = get_netdev_queue_index(queue); + int tc = netdev_txq_to_tc(dev, index); + + if (tc < 0) + return -EINVAL; + + return sprintf(buf, "%u\n", tc); +} + +#ifdef CONFIG_XPS static ssize_t show_tx_maxrate(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) @@ -1075,6 +1089,9 @@ static struct netdev_queue_attribute que static struct netdev_queue_attribute queue_trans_timeout = __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); +static struct netdev_queue_attribute queue_traffic_class = + __ATTR(traffic_class, S_IRUGO, show_traffic_class, NULL); + #ifdef CONFIG_BQL /* * Byte queue limits sysfs structures and functions. @@ -1260,6 +1277,7 @@ static struct netdev_queue_attribute xps static struct attribute *netdev_queue_default_attrs[] = { &queue_trans_timeout.attr, + &queue_traffic_class.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, &queue_tx_maxrate.attr, --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3706,7 +3706,7 @@ static int rtnl_get_offload_stats(struct if (!size) continue; - if (!dev->netdev_ops->ndo_has_offload_stats(attr_id)) + if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) continue; attr = nla_reserve_64bit(skb, attr_id, size, @@ -3747,7 +3747,7 @@ static int rtnl_get_offload_stats_size(c for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { - if (!dev->netdev_ops->ndo_has_offload_stats(attr_id)) + if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id)) continue; size = rtnl_get_offload_stats_attr_size(attr_id); nla_size += nla_total_size_64bit(size); --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *sk } EXPORT_SYMBOL(napi_consume_skb); +/** + * skb_recycle - clean up an skb for reuse + * @skb: buffer + * + * Recycles the skb to be reused as a receive buffer. This + * function does any necessary reference count dropping, and + * cleans up the skbuff as if it just came from __alloc_skb(). + */ +void skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + u8 head_frag = skb->head_frag; + + skb_release_head_state(skb); + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb->head_frag = head_frag; + skb_reset_tail_pointer(skb); +} +EXPORT_SYMBOL(skb_recycle); + /* Make sure a field is enclosed inside headers_start/headers_end section */ #define CHECK_SKB_FIELD(field) \ BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ @@ -1075,7 +1101,7 @@ static void skb_headers_offset_update(st skb->inner_mac_header += off; } -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { __copy_skb_header(new, old); @@ -1083,6 +1109,7 @@ static void copy_skb_header(struct sk_bu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } +EXPORT_SYMBOL(copy_skb_header); static inline int skb_alloc_rx_flag(const struct sk_buff *skb) { --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -309,6 +309,13 @@ static void dev_watchdog(unsigned long a txq->trans_timeout++; break; } + + /* Devices with HW_ACCEL_MQ have multiple txqs + * but update only the first one's transmission + * timestamp so avoid checking the rest. + */ + if (dev->features & NETIF_F_HW_ACCEL_MQ) + break; } if (some_queue_timedout) {