From 12e9319da1adacac92930c899c99f0e1970cac11 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Thu, 19 Jan 2017 02:01:31 +0100 Subject: [PATCH 33/38] NET: add qualcomm essedma ethernet driver Signed-off-by: Christian Lamparter --- drivers/net/ethernet/qualcomm/Kconfig | 9 +++++++++ drivers/net/ethernet/qualcomm/Makefile | 1 + 2 files changed, 10 insertions(+) --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -61,4 +61,13 @@ config QCOM_EMAC source "drivers/net/ethernet/qualcomm/rmnet/Kconfig" +config ESSEDMA + tristate "Qualcomm Atheros ESS Edma support" + ---help--- + This driver supports ethernet edma adapter. + Say Y to build this driver. + + To compile this driver as a module, choose M here. The module + will be called essedma.ko. + endif # NET_VENDOR_QUALCOMM --- a/drivers/net/ethernet/qualcomm/Makefile +++ b/drivers/net/ethernet/qualcomm/Makefile @@ -10,5 +10,6 @@ obj-$(CONFIG_QCA7000_UART) += qcauart.o qcauart-objs := qca_uart.o obj-y += emac/ +obj-$(CONFIG_ESSEDMA) += essedma/ obj-$(CONFIG_RMNET) += rmnet/ --- /dev/null +++ b/drivers/net/ethernet/qualcomm/essedma/Makefile @@ -0,0 +1,9 @@ +# +## Makefile for the Qualcomm Atheros ethernet edma driver +# + + +obj-$(CONFIG_ESSEDMA) += essedma.o + +essedma-objs := edma_axi.o edma.o edma_ethtool.o + --- /dev/null +++ b/drivers/net/ethernet/qualcomm/essedma/edma.c @@ -0,0 +1,2143 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "ess_edma.h" +#include "edma.h" + +extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; +bool edma_stp_rstp; +u16 edma_ath_eth_type; + +/* edma_skb_priority_offset() + * get edma skb priority + */ +static unsigned int edma_skb_priority_offset(struct sk_buff *skb) +{ + return (skb->priority >> 2) & 1; +} + +/* edma_alloc_tx_ring() + * Allocate Tx descriptors ring + */ +static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo, + struct edma_tx_desc_ring *etdr) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + /* Initialize ring */ + etdr->size = sizeof(struct edma_sw_desc) * etdr->count; + etdr->sw_next_to_fill = 0; + etdr->sw_next_to_clean = 0; + + /* Allocate SW descriptors */ + etdr->sw_desc = vzalloc(etdr->size); + if (!etdr->sw_desc) { + dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr); + return -ENOMEM; + } + + /* Allocate HW descriptors */ + etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma, + GFP_KERNEL); + if (!etdr->hw_desc) { + dev_err(&pdev->dev, "descriptor allocation for tx ring failed"); + vfree(etdr->sw_desc); + return -ENOMEM; + } + + return 0; +} + +/* edma_free_tx_ring() + * Free tx rings allocated by edma_alloc_tx_rings + */ +static void edma_free_tx_ring(struct edma_common_info *edma_cinfo, + struct edma_tx_desc_ring *etdr) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + if (likely(etdr->dma)) + dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc, + etdr->dma); + + vfree(etdr->sw_desc); + etdr->sw_desc = NULL; +} + +/* edma_alloc_rx_ring() + * allocate rx descriptor ring + */ +static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo, + struct edma_rfd_desc_ring *erxd) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + erxd->size = sizeof(struct edma_sw_desc) * erxd->count; + erxd->sw_next_to_fill = 0; + erxd->sw_next_to_clean = 0; + + /* Allocate SW descriptors */ + erxd->sw_desc = vzalloc(erxd->size); + if (!erxd->sw_desc) + return -ENOMEM; + + /* Alloc HW descriptors */ + erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma, + GFP_KERNEL); + if (!erxd->hw_desc) { + vfree(erxd->sw_desc); + return -ENOMEM; + } + + return 0; +} + +/* edma_free_rx_ring() + * Free rx ring allocated by alloc_rx_ring + */ +static void edma_free_rx_ring(struct edma_common_info *edma_cinfo, + struct edma_rfd_desc_ring *rxdr) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + if (likely(rxdr->dma)) + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc, + rxdr->dma); + + vfree(rxdr->sw_desc); + rxdr->sw_desc = NULL; +} + +/* edma_configure_tx() + * Configure transmission control data + */ +static void edma_configure_tx(struct edma_common_info *edma_cinfo) +{ + u32 txq_ctrl_data; + + txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT); + txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN; + txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT); + edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data); +} + + +/* edma_configure_rx() + * configure reception control data + */ +static void edma_configure_rx(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + u32 rss_type, rx_desc1, rxq_ctrl_data; + + /* Set RSS type */ + rss_type = hw->rss_type; + edma_write_reg(EDMA_REG_RSS_TYPE, rss_type); + + /* Set RFD burst number */ + rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT); + + /* Set RFD prefetch threshold */ + rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT); + + /* Set RFD in host ring low threshold to generte interrupt */ + rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT); + edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1); + + /* Set Rx FIFO threshold to start to DMA data to host */ + rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE; + + /* Set RX remove vlan bit */ + rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN; + + edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data); +} + +/* edma_alloc_rx_buf() + * does skb allocation for the received packets. + */ +static int edma_alloc_rx_buf(struct edma_common_info + *edma_cinfo, + struct edma_rfd_desc_ring *erdr, + int cleaned_count, int queue_id) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_rx_free_desc *rx_desc; + struct edma_sw_desc *sw_desc; + struct sk_buff *skb; + unsigned int i; + u16 prod_idx, length; + u32 reg_data; + + if (cleaned_count > erdr->count) { + dev_err(&pdev->dev, "Incorrect cleaned_count %d", + cleaned_count); + return -1; + } + + i = erdr->sw_next_to_fill; + + while (cleaned_count) { + sw_desc = &erdr->sw_desc[i]; + length = edma_cinfo->rx_head_buffer_len; + + if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) { + skb = sw_desc->skb; + } else { + /* alloc skb */ + skb = netdev_alloc_skb(edma_netdev[0], length); + if (!skb) { + /* Better luck next round */ + break; + } + } + + if (edma_cinfo->page_mode) { + struct page *pg = alloc_page(GFP_ATOMIC); + + if (!pg) { + dev_kfree_skb_any(skb); + break; + } + + sw_desc->dma = dma_map_page(&pdev->dev, pg, 0, + edma_cinfo->rx_page_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + sw_desc->dma)) { + __free_page(pg); + dev_kfree_skb_any(skb); + break; + } + + skb_fill_page_desc(skb, 0, pg, 0, + edma_cinfo->rx_page_buffer_len); + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG; + sw_desc->length = edma_cinfo->rx_page_buffer_len; + } else { + sw_desc->dma = dma_map_single(&pdev->dev, skb->data, + length, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + sw_desc->dma)) { + dev_kfree_skb_any(skb); + break; + } + + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD; + sw_desc->length = length; + } + + /* Update the buffer info */ + sw_desc->skb = skb; + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]); + rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma); + if (++i == erdr->count) + i = 0; + cleaned_count--; + } + + erdr->sw_next_to_fill = i; + + if (i == 0) + prod_idx = erdr->count - 1; + else + prod_idx = i - 1; + + /* Update the producer index */ + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), ®_data); + reg_data &= ~EDMA_RFD_PROD_IDX_BITS; + reg_data |= prod_idx; + edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data); + return cleaned_count; +} + +/* edma_init_desc() + * update descriptor ring size, buffer and producer/consumer index + */ +static void edma_init_desc(struct edma_common_info *edma_cinfo) +{ + struct edma_rfd_desc_ring *rfd_ring; + struct edma_tx_desc_ring *etdr; + int i = 0, j = 0; + u32 data = 0; + u16 hw_cons_idx = 0; + + /* Set the base address of every TPD ring. */ + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + etdr = edma_cinfo->tpd_ring[i]; + + /* Update descriptor ring base address */ + edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma); + edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data); + + /* Calculate hardware consumer index */ + hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff; + etdr->sw_next_to_fill = hw_cons_idx; + etdr->sw_next_to_clean = hw_cons_idx; + data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT); + data |= hw_cons_idx; + + /* update producer index */ + edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data); + + /* update SW consumer index register */ + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx); + + /* Set TPD ring size */ + edma_write_reg(EDMA_REG_TPD_RING_SIZE, + edma_cinfo->tx_ring_count & + EDMA_TPD_RING_SIZE_MASK); + } + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + rfd_ring = edma_cinfo->rfd_ring[j]; + /* Update Receive Free descriptor ring base address */ + edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j), + (u32)(rfd_ring->dma)); + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + data = edma_cinfo->rx_head_buffer_len; + if (edma_cinfo->page_mode) + data = edma_cinfo->rx_page_buffer_len; + + data &= EDMA_RX_BUF_SIZE_MASK; + data <<= EDMA_RX_BUF_SIZE_SHIFT; + + /* Update RFD ring size and RX buffer size */ + data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK) + << EDMA_RFD_RING_SIZE_SHIFT; + + edma_write_reg(EDMA_REG_RX_DESC0, data); + + /* Disable TX FIFO low watermark and high watermark */ + edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0); + + /* Load all of base address above */ + edma_read_reg(EDMA_REG_TX_SRAM_PART, &data); + data |= 1 << EDMA_LOAD_PTR_SHIFT; + edma_write_reg(EDMA_REG_TX_SRAM_PART, data); +} + +/* edma_receive_checksum + * Api to check checksum on receive packets + */ +static void edma_receive_checksum(struct edma_rx_return_desc *rd, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* check the RRD IP/L4 checksum bit to see if + * its set, which in turn indicates checksum + * failure. + */ + if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) + return; + + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/* edma_clean_rfd() + * clean up rx resourcers on error + */ +static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index) +{ + struct edma_rx_free_desc *rx_desc; + struct edma_sw_desc *sw_desc; + + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]); + sw_desc = &erdr->sw_desc[index]; + if (sw_desc->skb) { + dev_kfree_skb_any(sw_desc->skb); + sw_desc->skb = NULL; + } + + memset(rx_desc, 0, sizeof(struct edma_rx_free_desc)); +} + +/* edma_rx_complete_fraglist() + * Complete Rx processing for fraglist skbs + */ +static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd) +{ + int i; + u32 priority; + u16 port_type; + u8 mac_addr[EDMA_ETH_HDR_LEN]; + + port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT) + & EDMA_RRD_PORT_TYPE_MASK; + /* if port type is 0x4, then only proceed with + * other stp/rstp calculation + */ + if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) { + u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; + + /* calculate the frame priority */ + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) + & EDMA_RRD_PRIORITY_MASK; + + for (i = 0; i < EDMA_ETH_HDR_LEN; i++) + mac_addr[i] = skb->data[i]; + + /* Check if destination mac addr is bpdu addr */ + if (!memcmp(mac_addr, bpdu_mac, 6)) { + /* destination mac address is BPDU + * destination mac address, then add + * atheros header to the packet. + */ + u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) | + (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) | + (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id; + skb_push(skb, 4); + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); + *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type); + *(uint16_t *)&skb->data[14] = htons(athr_hdr); + } + } +} + +/* + * edma_rx_complete_fraglist() + * Complete Rx processing for fraglist skbs + */ +static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_hw *hw = &edma_cinfo->hw; + struct sk_buff *skb_temp; + struct edma_sw_desc *sw_desc; + int i; + u16 size_remaining; + + skb->data_len = 0; + skb->tail += (hw->rx_head_buff_size - 16); + skb->len = skb->truesize = length; + size_remaining = length - (hw->rx_head_buff_size - 16); + + /* clean-up all related sw_descs */ + for (i = 1; i < num_rfds; i++) { + struct sk_buff *skb_prev; + sw_desc = &erdr->sw_desc[sw_next_to_clean]; + skb_temp = sw_desc->skb; + + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + + if (size_remaining < hw->rx_head_buff_size) + skb_put(skb_temp, size_remaining); + else + skb_put(skb_temp, hw->rx_head_buff_size); + + /* + * If we are processing the first rfd, we link + * skb->frag_list to the skb corresponding to the + * first RFD + */ + if (i == 1) + skb_shinfo(skb)->frag_list = skb_temp; + else + skb_prev->next = skb_temp; + skb_prev = skb_temp; + skb_temp->next = NULL; + + skb->data_len += skb_temp->len; + size_remaining -= skb_temp->len; + + /* Increment SW index */ + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + (*cleaned_count)++; + } + + return sw_next_to_clean; +} + +/* edma_rx_complete_paged() + * Complete Rx processing for paged skbs + */ +static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct sk_buff *skb_temp; + struct edma_sw_desc *sw_desc; + int i; + u16 size_remaining; + + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + /* Setup skbuff fields */ + skb->len = length; + + if (likely(num_rfds <= 1)) { + skb->data_len = length; + skb->truesize += edma_cinfo->rx_page_buffer_len; + skb_fill_page_desc(skb, 0, skb_frag_page(frag), + 16, length); + } else { + frag->size -= 16; + skb->data_len = frag->size; + skb->truesize += edma_cinfo->rx_page_buffer_len; + size_remaining = length - frag->size; + + skb_fill_page_desc(skb, 0, skb_frag_page(frag), + 16, frag->size); + + /* clean-up all related sw_descs */ + for (i = 1; i < num_rfds; i++) { + sw_desc = &erdr->sw_desc[sw_next_to_clean]; + skb_temp = sw_desc->skb; + frag = &skb_shinfo(skb_temp)->frags[0]; + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + + if (size_remaining < edma_cinfo->rx_page_buffer_len) + frag->size = size_remaining; + + skb_fill_page_desc(skb, i, skb_frag_page(frag), + 0, frag->size); + + skb_shinfo(skb_temp)->nr_frags = 0; + dev_kfree_skb_any(skb_temp); + + skb->data_len += frag->size; + skb->truesize += edma_cinfo->rx_page_buffer_len; + size_remaining -= frag->size; + + /* Increment SW index */ + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + (*cleaned_count)++; + } + } + + return sw_next_to_clean; +} + +/* + * edma_rx_complete() + * Main api called from the poll function to process rx packets. + */ +static void edma_rx_complete(struct edma_common_info *edma_cinfo, + int *work_done, int work_to_do, int queue_id, + struct napi_struct *napi) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id]; + struct net_device *netdev; + struct edma_adapter *adapter; + struct edma_sw_desc *sw_desc; + struct sk_buff *skb; + struct edma_rx_return_desc *rd; + u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1, + sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0; + u32 data = 0; + u8 *vaddr; + int port_id, i, drop_count = 0; + u32 priority; + u16 count = erdr->count, rfd_avail; + u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3}; + + sw_next_to_clean = erdr->sw_next_to_clean; + + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & + EDMA_RFD_CONS_IDX_MASK; + + do { + while (sw_next_to_clean != hw_next_to_clean) { + if (!work_to_do) + break; + + sw_desc = &erdr->sw_desc[sw_next_to_clean]; + skb = sw_desc->skb; + + /* Unmap the allocated buffer */ + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + else + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + + /* Get RRD */ + if (edma_cinfo->page_mode) { + vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0])); + memcpy((uint8_t *)&rrd[0], vaddr, 16); + rd = (struct edma_rx_return_desc *)rrd; + kunmap_atomic(vaddr); + } else { + rd = (struct edma_rx_return_desc *)skb->data; + } + + /* Check if RRD is valid */ + if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) { + edma_clean_rfd(erdr, sw_next_to_clean); + sw_next_to_clean = (sw_next_to_clean + 1) & + (erdr->count - 1); + cleaned_count++; + continue; + } + + /* Get the number of RFDs from RRD */ + num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK; + + /* Get Rx port ID from switch */ + port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK; + if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) { + dev_err(&pdev->dev, "Invalid RRD source port bit set"); + for (i = 0; i < num_rfds; i++) { + edma_clean_rfd(erdr, sw_next_to_clean); + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + cleaned_count++; + } + continue; + } + + /* check if we have a sink for the data we receive. + * If the interface isn't setup, we have to drop the + * incoming data for now. + */ + netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id]; + if (!netdev) { + edma_clean_rfd(erdr, sw_next_to_clean); + sw_next_to_clean = (sw_next_to_clean + 1) & + (erdr->count - 1); + cleaned_count++; + continue; + } + adapter = netdev_priv(netdev); + + /* This code is added to handle a usecase where high + * priority stream and a low priority stream are + * received simultaneously on DUT. The problem occurs + * if one of the Rx rings is full and the corresponding + * core is busy with other stuff. This causes ESS CPU + * port to backpressure all incoming traffic including + * high priority one. We monitor free descriptor count + * on each CPU and whenever it reaches threshold (< 80), + * we drop all low priority traffic and let only high + * priotiy traffic pass through. We can hence avoid + * ESS CPU port to send backpressure on high priroity + * stream. + */ + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) + & EDMA_RRD_PRIORITY_MASK; + if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) { + rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1); + if (rfd_avail < EDMA_RFD_AVAIL_THR) { + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE; + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + adapter->stats.rx_dropped++; + cleaned_count++; + drop_count++; + if (drop_count == 3) { + work_to_do--; + (*work_done)++; + drop_count = 0; + } + if (cleaned_count == EDMA_RX_BUFFER_WRITE) { + /* If buffer clean count reaches 16, we replenish HW buffers. */ + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), + sw_next_to_clean); + cleaned_count = ret_count; + } + continue; + } + } + + work_to_do--; + (*work_done)++; + + /* Increment SW index */ + sw_next_to_clean = (sw_next_to_clean + 1) & + (erdr->count - 1); + + cleaned_count++; + + /* Get the packet size and allocate buffer */ + length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK; + + if (edma_cinfo->page_mode) { + /* paged skb */ + sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo); + if (!pskb_may_pull(skb, ETH_HLEN)) { + dev_kfree_skb_any(skb); + continue; + } + } else { + /* single or fraglist skb */ + + /* Addition of 16 bytes is required, as in the packet + * first 16 bytes are rrd descriptors, so actual data + * starts from an offset of 16. + */ + skb_reserve(skb, 16); + if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) { + skb_put(skb, length); + } else { + sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo); + } + } + + if (edma_stp_rstp) { + edma_rx_complete_stp_rstp(skb, port_id, rd); + } + + skb->protocol = eth_type_trans(skb, netdev); + + /* Record Rx queue for RFS/RPS and fill flow hash from HW */ + skb_record_rx_queue(skb, queue_to_rxid[queue_id]); + if (netdev->features & NETIF_F_RXHASH) { + hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT); + if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END)) + skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4); + } + +#ifdef CONFIG_NF_FLOW_COOKIE + skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK; +#endif + edma_receive_checksum(rd, skb); + + /* Process VLAN HW acceleration indication provided by HW */ + if (unlikely(adapter->default_vlan_tag != rd->rrd4)) { + vlan = rd->rrd4; + if (likely(rd->rrd7 & EDMA_RRD_CVLAN)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + else if (rd->rrd1 & EDMA_RRD_SVLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan); + } + + /* Update rx statistics */ + adapter->stats.rx_packets++; + adapter->stats.rx_bytes += length; + + /* Check if we reached refill threshold */ + if (cleaned_count == EDMA_RX_BUFFER_WRITE) { + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), + sw_next_to_clean); + cleaned_count = ret_count; + } + + /* At this point skb should go to stack */ + napi_gro_receive(napi, skb); + } + + /* Check if we still have NAPI budget */ + if (!work_to_do) + break; + + /* Read index once again since we still have NAPI budget */ + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & + EDMA_RFD_CONS_IDX_MASK; + } while (hw_next_to_clean != sw_next_to_clean); + + erdr->sw_next_to_clean = sw_next_to_clean; + + /* Refill here in case refill threshold wasn't reached */ + if (likely(cleaned_count)) { + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); + if (ret_count) + dev_dbg(&pdev->dev, "Not all buffers was reallocated"); + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), + erdr->sw_next_to_clean); + } +} + +/* edma_delete_rfs_filter() + * Remove RFS filter from switch + */ +static int edma_delete_rfs_filter(struct edma_adapter *adapter, + struct edma_rfs_filter_node *filter_node) +{ + int res = -1; + + struct flow_keys *keys = &filter_node->keys; + + if (likely(adapter->set_rfs_rule)) + res = (*adapter->set_rfs_rule)(adapter->netdev, + flow_get_u32_src(keys), flow_get_u32_dst(keys), + keys->ports.src, keys->ports.dst, + keys->basic.ip_proto, filter_node->rq_id, 0); + + return res; +} + +/* edma_add_rfs_filter() + * Add RFS filter to switch + */ +static int edma_add_rfs_filter(struct edma_adapter *adapter, + struct flow_keys *keys, u16 rq, + struct edma_rfs_filter_node *filter_node) +{ + int res = -1; + + struct flow_keys *dest_keys = &filter_node->keys; + + memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys)); +/* + dest_keys->control = keys->control; + dest_keys->basic = keys->basic; + dest_keys->addrs = keys->addrs; + dest_keys->ports = keys->ports; + dest_keys.ip_proto = keys->ip_proto; +*/ + /* Call callback registered by ESS driver */ + if (likely(adapter->set_rfs_rule)) + res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys), + flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst, + keys->basic.ip_proto, rq, 1); + + return res; +} + +/* edma_rfs_key_search() + * Look for existing RFS entry + */ +static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h, + struct flow_keys *key) +{ + struct edma_rfs_filter_node *p; + + hlist_for_each_entry(p, h, node) + if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) && + flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) && + p->keys.ports.src == key->ports.src && + p->keys.ports.dst == key->ports.dst && + p->keys.basic.ip_proto == key->basic.ip_proto) + return p; + return NULL; +} + +/* edma_initialise_rfs_flow_table() + * Initialise EDMA RFS flow table + */ +static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter) +{ + int i; + + spin_lock_init(&adapter->rfs.rfs_ftab_lock); + + /* Initialize EDMA flow hash table */ + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) + INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]); + + adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES; + adapter->rfs.filter_available = adapter->rfs.max_num_filter; + adapter->rfs.hashtoclean = 0; + + /* Add timer to get periodic RFS updates from OS */ + timer_setup(&adapter->rfs.expire_rfs, edma_flow_may_expire, 0); + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4); +} + +/* edma_free_rfs_flow_table() + * Free EDMA RFS flow table + */ +static void edma_free_rfs_flow_table(struct edma_adapter *adapter) +{ + int i; + + /* Remove sync timer */ + del_timer_sync(&adapter->rfs.expire_rfs); + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + + /* Free EDMA RFS table entries */ + adapter->rfs.filter_available = 0; + + /* Clean-up EDMA flow hash table */ + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct edma_rfs_filter_node *filter_node; + int res; + + hhead = &adapter->rfs.hlist_head[i]; + hlist_for_each_entry_safe(filter_node, tmp, hhead, node) { + res = edma_delete_rfs_filter(adapter, filter_node); + if (res < 0) + dev_warn(&adapter->netdev->dev, + "EDMA going down but RFS entry %d not allowed to be flushed by Switch", + filter_node->flow_id); + hlist_del(&filter_node->node); + kfree(filter_node); + } + } + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); +} + +/* edma_tx_unmap_and_free() + * clean TX buffer + */ +static inline void edma_tx_unmap_and_free(struct platform_device *pdev, + struct edma_sw_desc *sw_desc) +{ + struct sk_buff *skb = sw_desc->skb; + + if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) || + (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST))) + /* unmap_single for skb head area */ + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_TO_DEVICE); + else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG) + /* unmap page for paged fragments */ + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_TO_DEVICE); + + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST)) + dev_kfree_skb_any(skb); + + sw_desc->flags = 0; +} + +/* edma_tx_complete() + * Used to clean tx queues and update hardware and consumer index + */ +static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + struct edma_sw_desc *sw_desc; + struct platform_device *pdev = edma_cinfo->pdev; + int i; + + u16 sw_next_to_clean = etdr->sw_next_to_clean; + u16 hw_next_to_clean; + u32 data = 0; + + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data); + hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK; + + /* clean the buffer here */ + while (sw_next_to_clean != hw_next_to_clean) { + sw_desc = &etdr->sw_desc[sw_next_to_clean]; + edma_tx_unmap_and_free(pdev, sw_desc); + sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1); + } + + etdr->sw_next_to_clean = sw_next_to_clean; + + /* update the TPD consumer index register */ + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean); + + /* Wake the queue if queue is stopped and netdev link is up */ + for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) { + if (netif_tx_queue_stopped(etdr->nq[i])) { + if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i])) + netif_tx_wake_queue(etdr->nq[i]); + } + } +} + +/* edma_get_tx_buffer() + * Get sw_desc corresponding to the TPD + */ +static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo, + struct edma_tx_desc *tpd, int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc]; +} + +/* edma_get_next_tpd() + * Return a TPD descriptor for transfer + */ +static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo, + int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + u16 sw_next_to_fill = etdr->sw_next_to_fill; + struct edma_tx_desc *tpd_desc = + (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]); + + etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1); + + return tpd_desc; +} + +/* edma_tpd_available() + * Check number of free TPDs + */ +static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo, + int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + + u16 sw_next_to_fill; + u16 sw_next_to_clean; + u16 count = 0; + + sw_next_to_clean = etdr->sw_next_to_clean; + sw_next_to_fill = etdr->sw_next_to_fill; + + if (likely(sw_next_to_clean <= sw_next_to_fill)) + count = etdr->count; + + return count + sw_next_to_clean - sw_next_to_fill - 1; +} + +/* edma_tx_queue_get() + * Get the starting number of the queue + */ +static inline int edma_tx_queue_get(struct edma_adapter *adapter, + struct sk_buff *skb, int txq_id) +{ + /* skb->priority is used as an index to skb priority table + * and based on packet priority, correspong queue is assigned. + */ + return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb); +} + +/* edma_tx_update_hw_idx() + * update the producer index for the ring transmitted + */ +static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo, + struct sk_buff *skb, int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + u32 tpd_idx_data; + + /* Read and update the producer index */ + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data); + tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS; + tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK) + << EDMA_TPD_PROD_IDX_SHIFT; + + edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data); +} + +/* edma_rollback_tx() + * Function to retrieve tx resources in case of error + */ +static void edma_rollback_tx(struct edma_adapter *adapter, + struct edma_tx_desc *start_tpd, int queue_id) +{ + struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id]; + struct edma_sw_desc *sw_desc; + struct edma_tx_desc *tpd = NULL; + u16 start_index, index; + + start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc); + + index = start_index; + while (index != etdr->sw_next_to_fill) { + tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]); + sw_desc = &etdr->sw_desc[index]; + edma_tx_unmap_and_free(adapter->pdev, sw_desc); + memset(tpd, 0, sizeof(struct edma_tx_desc)); + if (++index == etdr->count) + index = 0; + } + etdr->sw_next_to_fill = start_index; +} + +/* edma_tx_map_and_fill() + * gets called from edma_xmit_frame + * + * This is where the dma of the buffer to be transmitted + * gets mapped + */ +static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo, + struct edma_adapter *adapter, struct sk_buff *skb, int queue_id, + unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap, + bool packet_is_rstp, int nr_frags) +{ + struct edma_sw_desc *sw_desc = NULL; + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_tx_desc *tpd = NULL, *start_tpd = NULL; + struct sk_buff *iter_skb; + int i = 0; + u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0; + u16 buf_len, lso_desc_len = 0; + + /* It should either be a nr_frags skb or fraglist skb but not both */ + BUG_ON(nr_frags && skb_has_frag_list(skb)); + + if (skb_is_gso(skb)) { + /* TODO: What additional checks need to be performed here */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + lso_word1 |= EDMA_TPD_IPV4_EN; + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + lso_word1 |= EDMA_TPD_LSO_V2_EN; + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else + return -EINVAL; + + lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) | + (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT); + } else if (flags_transmit & EDMA_HW_CHECKSUM) { + u8 css, cso; + cso = skb_checksum_start_offset(skb); + css = cso + skb->csum_offset; + + word1 |= (EDMA_TPD_CUSTOM_CSUM_EN); + word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT; + word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT); + } + + if (skb->protocol == htons(ETH_P_PPP_SES)) + word1 |= EDMA_TPD_PPPOE_EN; + + if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) { + switch(skb->vlan_proto) { + case htons(ETH_P_8021Q): + word3 |= (1 << EDMA_TX_INS_CVLAN); + word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT; + break; + case htons(ETH_P_8021AD): + word1 |= (1 << EDMA_TX_INS_SVLAN); + svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT; + break; + default: + dev_err(&pdev->dev, "no ctag or stag present\n"); + goto vlan_tag_error; + } + } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) { + word3 |= (1 << EDMA_TX_INS_CVLAN); + word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT; + } + + if (packet_is_rstp) { + word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; + word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT; + } else { + word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; + } + + buf_len = skb_headlen(skb); + + if (lso_word1) { + if (lso_word1 & EDMA_TPD_LSO_V2_EN) { + + /* IPv6 LSOv2 descriptor */ + start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id); + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE; + + /* LSOv2 descriptor overrides addr field to pass length */ + tpd->addr = cpu_to_le16(skb->len); + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + } + + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + if (!start_tpd) + start_tpd = tpd; + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + + /* The last buffer info contain the skb address, + * so skb will be freed after unmap + */ + sw_desc->length = lso_desc_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; + + sw_desc->dma = dma_map_single(&adapter->pdev->dev, + skb->data, buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + + /* The last buffer info contain the skb address, + * so it will be freed after unmap + */ + sw_desc->length = lso_desc_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; + + buf_len = 0; + } + + if (likely(buf_len)) { + + /* TODO Do not dequeue descriptor if there is a potential error */ + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + + if (!start_tpd) + start_tpd = tpd; + + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + + /* The last buffer info contain the skb address, + * so it will be free after unmap + */ + sw_desc->length = buf_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; + sw_desc->dma = dma_map_single(&adapter->pdev->dev, + skb->data, buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + } + + /* Walk through all paged fragments */ + while (nr_frags--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + buf_len = skb_frag_size(frag); + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + sw_desc->length = buf_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG; + + sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE); + + if (dma_mapping_error(NULL, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + i++; + } + + /* Walk through all fraglist skbs */ + skb_walk_frags(skb, iter_skb) { + buf_len = iter_skb->len; + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + sw_desc->length = buf_len; + sw_desc->dma = dma_map_single(&adapter->pdev->dev, + iter_skb->data, buf_len, DMA_TO_DEVICE); + + if (dma_mapping_error(NULL, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST; + } + + if (tpd) + tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT; + + sw_desc->skb = skb; + sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST; + + return 0; + +dma_error: + edma_rollback_tx(adapter, start_tpd, queue_id); + dev_err(&pdev->dev, "TX DMA map failed\n"); +vlan_tag_error: + return -ENOMEM; +} + +/* edma_check_link() + * check Link status + */ +static int edma_check_link(struct edma_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + if (!(adapter->poll_required)) + return __EDMA_LINKUP; + + if (phydev->link) + return __EDMA_LINKUP; + + return __EDMA_LINKDOWN; +} + +/* edma_adjust_link() + * check for edma link status + */ +void edma_adjust_link(struct net_device *netdev) +{ + int status; + struct edma_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (!test_bit(__EDMA_UP, &adapter->state_flags)) + return; + + status = edma_check_link(adapter); + + if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) { + dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed); + adapter->link_state = __EDMA_LINKUP; + netif_carrier_on(netdev); + if (netif_running(netdev)) + netif_tx_wake_all_queues(netdev); + } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) { + dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name); + adapter->link_state = __EDMA_LINKDOWN; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } +} + +/* edma_get_stats() + * Statistics api used to retreive the tx/rx statistics + */ +struct net_device_stats *edma_get_stats(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + return &adapter->stats; +} + +/* edma_xmit() + * Main api to be called by the core for packet transmission + */ +netdev_tx_t edma_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct edma_adapter *adapter = netdev_priv(net_dev); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + struct edma_tx_desc_ring *etdr; + u16 from_cpu, dp_bitmap, txq_id; + int ret, nr_frags = 0, num_tpds_needed = 1, queue_id; + unsigned int flags_transmit = 0; + bool packet_is_rstp = false; + struct netdev_queue *nq = NULL; + + if (skb_shinfo(skb)->nr_frags) { + nr_frags = skb_shinfo(skb)->nr_frags; + num_tpds_needed += nr_frags; + } else if (skb_has_frag_list(skb)) { + struct sk_buff *iter_skb; + + skb_walk_frags(skb, iter_skb) + num_tpds_needed++; + } + + if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) { + dev_err(&net_dev->dev, + "skb received with fragments %d which is more than %lu", + num_tpds_needed, EDMA_MAX_SKB_FRAGS); + dev_kfree_skb_any(skb); + adapter->stats.tx_errors++; + return NETDEV_TX_OK; + } + + if (edma_stp_rstp) { + u16 ath_hdr, ath_eth_type; + u8 mac_addr[EDMA_ETH_HDR_LEN]; + ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]); + if (ath_eth_type == edma_ath_eth_type) { + packet_is_rstp = true; + ath_hdr = htons(*(uint16_t *)&skb->data[14]); + dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK; + from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT; + memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN); + + skb_pull(skb, 4); + + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); + } + } + + /* this will be one of the 4 TX queues exposed to linux kernel */ + txq_id = skb_get_queue_mapping(skb); + queue_id = edma_tx_queue_get(adapter, skb, txq_id); + etdr = edma_cinfo->tpd_ring[queue_id]; + nq = netdev_get_tx_queue(net_dev, txq_id); + + local_bh_disable(); + /* Tx is not handled in bottom half context. Hence, we need to protect + * Tx from tasks and bottom half + */ + + if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) { + /* not enough descriptor, just stop queue */ + netif_tx_stop_queue(nq); + local_bh_enable(); + dev_dbg(&net_dev->dev, "Not enough descriptors available"); + edma_cinfo->edma_ethstats.tx_desc_error++; + return NETDEV_TX_BUSY; + } + + /* Check and mark VLAN tag offload */ + if (skb_vlan_tag_present(skb)) + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG; + else if (adapter->default_vlan_tag) + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG; + + /* Check and mark checksum offload */ + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) + flags_transmit |= EDMA_HW_CHECKSUM; + + /* Map and fill descriptor for Tx */ + ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id, + flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags); + if (ret) { + dev_kfree_skb_any(skb); + adapter->stats.tx_errors++; + goto netdev_okay; + } + + /* Update SW producer index */ + edma_tx_update_hw_idx(edma_cinfo, skb, queue_id); + + /* update tx statistics */ + adapter->stats.tx_packets++; + adapter->stats.tx_bytes += skb->len; + +netdev_okay: + local_bh_enable(); + return NETDEV_TX_OK; +} + +/* + * edma_flow_may_expire() + * Timer function called periodically to delete the node + */ +void edma_flow_may_expire(struct timer_list *t) +{ + struct edma_rfs_flow_table *table = from_timer(table, t, expire_rfs); + struct edma_adapter *adapter = + container_of(table, typeof(*adapter), rfs); + int j; + + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct edma_rfs_filter_node *n; + bool res; + + hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++]; + hlist_for_each_entry_safe(n, tmp, hhead, node) { + res = rps_may_expire_flow(adapter->netdev, n->rq_id, + n->flow_id, n->filter_id); + if (res) { + int ret; + ret = edma_delete_rfs_filter(adapter, n); + if (ret < 0) + dev_dbg(&adapter->netdev->dev, + "RFS entry %d not allowed to be flushed by Switch", + n->flow_id); + else { + hlist_del(&n->node); + kfree(n); + adapter->rfs.filter_available++; + } + } + } + } + + adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1); + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4); +} + +/* edma_rx_flow_steer() + * Called by core to to steer the flow to CPU + */ +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq, u32 flow_id) +{ + struct flow_keys keys; + struct edma_rfs_filter_node *filter_node; + struct edma_adapter *adapter = netdev_priv(dev); + u16 hash_tblid; + int res; + + if (skb->protocol == htons(ETH_P_IPV6)) { + dev_err(&adapter->pdev->dev, "IPv6 not supported\n"); + res = -EINVAL; + goto no_protocol_err; + } + + /* Dissect flow parameters + * We only support IPv4 + TCP/UDP + */ + res = skb_flow_dissect_flow_keys(skb, &keys, 0); + if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) { + res = -EPROTONOSUPPORT; + goto no_protocol_err; + } + + /* Check if table entry exists */ + hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK; + + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys); + + if (filter_node) { + if (rxq == filter_node->rq_id) { + res = -EEXIST; + goto out; + } else { + res = edma_delete_rfs_filter(adapter, filter_node); + if (res < 0) + dev_warn(&adapter->netdev->dev, + "Cannot steer flow %d to different queue", + filter_node->flow_id); + else { + adapter->rfs.filter_available++; + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); + if (res < 0) { + dev_warn(&adapter->netdev->dev, + "Cannot steer flow %d to different queue", + filter_node->flow_id); + } else { + adapter->rfs.filter_available--; + filter_node->rq_id = rxq; + filter_node->filter_id = res; + } + } + } + } else { + if (adapter->rfs.filter_available == 0) { + res = -EBUSY; + goto out; + } + + filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC); + if (!filter_node) { + res = -ENOMEM; + goto out; + } + + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); + if (res < 0) { + kfree(filter_node); + goto out; + } + + adapter->rfs.filter_available--; + filter_node->rq_id = rxq; + filter_node->filter_id = res; + filter_node->flow_id = flow_id; + filter_node->keys = keys; + INIT_HLIST_NODE(&filter_node->node); + hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]); + } + +out: + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); +no_protocol_err: + return res; +} + +/* edma_register_rfs_filter() + * Add RFS filter callback + */ +int edma_register_rfs_filter(struct net_device *netdev, + set_rfs_filter_callback_t set_filter) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + + if (adapter->set_rfs_rule) { + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); + return -1; + } + + adapter->set_rfs_rule = set_filter; + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); + + return 0; +} + +/* edma_alloc_tx_rings() + * Allocate rx rings + */ +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + int i, err = 0; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); + if (err) { + dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i); + return err; + } + } + + return 0; +} + +/* edma_free_tx_rings() + * Free tx rings + */ +void edma_free_tx_rings(struct edma_common_info *edma_cinfo) +{ + int i; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) + edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); +} + +/* edma_free_tx_resources() + * Free buffers associated with tx rings + */ +void edma_free_tx_resources(struct edma_common_info *edma_cinfo) +{ + struct edma_tx_desc_ring *etdr; + struct edma_sw_desc *sw_desc; + struct platform_device *pdev = edma_cinfo->pdev; + int i, j; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + etdr = edma_cinfo->tpd_ring[i]; + for (j = 0; j < EDMA_TX_RING_SIZE; j++) { + sw_desc = &etdr->sw_desc[j]; + if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD | + EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST)) + edma_tx_unmap_and_free(pdev, sw_desc); + } + } +} + +/* edma_alloc_rx_rings() + * Allocate rx rings + */ +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + int i, j, err = 0; + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); + if (err) { + dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i); + return err; + } + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + return 0; +} + +/* edma_free_rx_rings() + * free rx rings + */ +void edma_free_rx_rings(struct edma_common_info *edma_cinfo) +{ + int i, j; + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } +} + +/* edma_free_queues() + * Free the queues allocaated + */ +void edma_free_queues(struct edma_common_info *edma_cinfo) +{ + int i , j; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + if (edma_cinfo->tpd_ring[i]) + kfree(edma_cinfo->tpd_ring[i]); + edma_cinfo->tpd_ring[i] = NULL; + } + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + if (edma_cinfo->rfd_ring[j]) + kfree(edma_cinfo->rfd_ring[j]); + edma_cinfo->rfd_ring[j] = NULL; + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + edma_cinfo->num_rx_queues = 0; + edma_cinfo->num_tx_queues = 0; + + return; +} + +/* edma_free_rx_resources() + * Free buffers associated with tx rings + */ +void edma_free_rx_resources(struct edma_common_info *edma_cinfo) +{ + struct edma_rfd_desc_ring *erdr; + struct edma_sw_desc *sw_desc; + struct platform_device *pdev = edma_cinfo->pdev; + int i, j, k; + + for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) { + erdr = edma_cinfo->rfd_ring[k]; + for (j = 0; j < EDMA_RX_RING_SIZE; j++) { + sw_desc = &erdr->sw_desc[j]; + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) { + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + edma_clean_rfd(erdr, j); + } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) { + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + edma_clean_rfd(erdr, j); + } + } + k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + + } +} + +/* edma_alloc_queues_tx() + * Allocate memory for all rings + */ +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo) +{ + int i; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + struct edma_tx_desc_ring *etdr; + etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL); + if (!etdr) + goto err; + etdr->count = edma_cinfo->tx_ring_count; + edma_cinfo->tpd_ring[i] = etdr; + } + + return 0; +err: + edma_free_queues(edma_cinfo); + return -1; +} + +/* edma_alloc_queues_rx() + * Allocate memory for all rings + */ +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo) +{ + int i, j; + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + struct edma_rfd_desc_ring *rfd_ring; + rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring), + GFP_KERNEL); + if (!rfd_ring) + goto err; + rfd_ring->count = edma_cinfo->rx_ring_count; + edma_cinfo->rfd_ring[j] = rfd_ring; + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + return 0; +err: + edma_free_queues(edma_cinfo); + return -1; +} + +/* edma_clear_irq_status() + * Clear interrupt status + */ +void edma_clear_irq_status() +{ + edma_write_reg(EDMA_REG_RX_ISR, 0xff); + edma_write_reg(EDMA_REG_TX_ISR, 0xffff); + edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff); + edma_write_reg(EDMA_REG_WOL_ISR, 0x1); +}; + +/* edma_configure() + * Configure skb, edma interrupts and control register. + */ +int edma_configure(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + u32 intr_modrt_data; + u32 intr_ctrl_data = 0; + int i, j, ret_count; + + edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data); + intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT); + intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT; + edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data); + + edma_clear_irq_status(); + + /* Clear any WOL status */ + edma_write_reg(EDMA_REG_WOL_CTRL, 0); + intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT); + intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT); + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); + edma_configure_tx(edma_cinfo); + edma_configure_rx(edma_cinfo); + + /* Allocate the RX buffer */ + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j]; + ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j); + if (ret_count) { + dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n"); + } + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + /* Configure descriptor Ring */ + edma_init_desc(edma_cinfo); + return 0; +} + +/* edma_irq_enable() + * Enable default interrupt generation settings + */ +void edma_irq_enable(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + int i, j; + + edma_write_reg(EDMA_REG_RX_ISR, 0xff); + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask); + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + edma_write_reg(EDMA_REG_TX_ISR, 0xffff); + for (i = 0; i < edma_cinfo->num_tx_queues; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask); +} + +/* edma_irq_disable() + * Disable Interrupt + */ +void edma_irq_disable(struct edma_common_info *edma_cinfo) +{ + int i; + + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0); + + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0); + edma_write_reg(EDMA_REG_MISC_IMR, 0); + edma_write_reg(EDMA_REG_WOL_IMR, 0); +} + +/* edma_free_irqs() + * Free All IRQs + */ +void edma_free_irqs(struct edma_adapter *adapter) +{ + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + int i, j; + int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2); + + for (i = 0; i < CONFIG_NR_CPUS; i++) { + for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++) + free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]); + + for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++) + free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]); + } +} + +/* edma_enable_rx_ctrl() + * Enable RX queue control + */ +void edma_enable_rx_ctrl(struct edma_hw *hw) +{ + u32 data; + + edma_read_reg(EDMA_REG_RXQ_CTRL, &data); + data |= EDMA_RXQ_CTRL_EN; + edma_write_reg(EDMA_REG_RXQ_CTRL, data); +} + + +/* edma_enable_tx_ctrl() + * Enable TX queue control + */ +void edma_enable_tx_ctrl(struct edma_hw *hw) +{ + u32 data; + + edma_read_reg(EDMA_REG_TXQ_CTRL, &data); + data |= EDMA_TXQ_CTRL_TXQ_EN; + edma_write_reg(EDMA_REG_TXQ_CTRL, data); +} + +/* edma_stop_rx_tx() + * Disable RX/TQ Queue control + */ +void edma_stop_rx_tx(struct edma_hw *hw) +{ + u32 data; + + edma_read_reg(EDMA_REG_RXQ_CTRL, &data); + data &= ~EDMA_RXQ_CTRL_EN; + edma_write_reg(EDMA_REG_RXQ_CTRL, data); + edma_read_reg(EDMA_REG_TXQ_CTRL, &data); + data &= ~EDMA_TXQ_CTRL_TXQ_EN; + edma_write_reg(EDMA_REG_TXQ_CTRL, data); +} + +/* edma_reset() + * Reset the EDMA + */ +int edma_reset(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + + edma_irq_disable(edma_cinfo); + + edma_clear_irq_status(); + + edma_stop_rx_tx(hw); + + return 0; +} + +/* edma_fill_netdev() + * Fill netdev for each etdr + */ +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id, + int dev, int txq_id) +{ + struct edma_tx_desc_ring *etdr; + int i = 0; + + etdr = edma_cinfo->tpd_ring[queue_id]; + + while (etdr->netdev[i]) + i++; + + if (i >= EDMA_MAX_NETDEV_PER_QUEUE) + return -1; + + /* Populate the netdev associated with the tpd ring */ + etdr->netdev[i] = edma_netdev[dev]; + etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id); + + return 0; +} + +/* edma_set_mac() + * Change the Ethernet Address of the NIC + */ +int edma_set_mac_addr(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + return 0; +} + +/* edma_set_stp_rstp() + * set stp/rstp + */ +void edma_set_stp_rstp(bool rstp) +{ + edma_stp_rstp = rstp; +} + +/* edma_assign_ath_hdr_type() + * assign atheros header eth type + */ +void edma_assign_ath_hdr_type(int eth_type) +{ + edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK; +} + +/* edma_get_default_vlan_tag() + * Used by other modules to get the default vlan tag + */ +int edma_get_default_vlan_tag(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + if (adapter->default_vlan_tag) + return adapter->default_vlan_tag; + + return 0; +} + +/* edma_open() + * gets called when netdevice is up, start the queue. + */ +int edma_open(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct platform_device *pdev = adapter->edma_cinfo->pdev; + + netif_tx_start_all_queues(netdev); + edma_initialise_rfs_flow_table(adapter); + set_bit(__EDMA_UP, &adapter->state_flags); + + /* if Link polling is enabled, in our case enabled for WAN, then + * do a phy start, else always set link as UP + */ + if (adapter->poll_required) { + if (!IS_ERR(adapter->phydev)) { + phy_start(adapter->phydev); + phy_start_aneg(adapter->phydev); + adapter->link_state = __EDMA_LINKDOWN; + } else { + dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n"); + } + } else { + adapter->link_state = __EDMA_LINKUP; + netif_carrier_on(netdev); + } + + return 0; +} + + +/* edma_close() + * gets called when netdevice is down, stops the queue. + */ +int edma_close(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + edma_free_rfs_flow_table(adapter); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + if (adapter->poll_required) { + if (!IS_ERR(adapter->phydev)) + phy_stop(adapter->phydev); + } + + adapter->link_state = __EDMA_LINKDOWN; + + /* Set GMAC state to UP before link state is checked + */ + clear_bit(__EDMA_UP, &adapter->state_flags); + + return 0; +} + +/* edma_poll + * polling function that gets called when the napi gets scheduled. + * + * Main sequence of task performed in this api + * is clear irq status -> clear_tx_irq -> clean_rx_irq-> + * enable interrupts. + */ +int edma_poll(struct napi_struct *napi, int budget) +{ + struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi, + struct edma_per_cpu_queues_info, napi); + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; + u32 reg_data; + u32 shadow_rx_status, shadow_tx_status; + int queue_id; + int i, work_done = 0; + + /* Store the Rx/Tx status by ANDing it with + * appropriate CPU RX?TX mask + */ + edma_read_reg(EDMA_REG_RX_ISR, ®_data); + edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask; + shadow_rx_status = edma_percpu_info->rx_status; + edma_read_reg(EDMA_REG_TX_ISR, ®_data); + edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask; + shadow_tx_status = edma_percpu_info->tx_status; + + /* Every core will have a start, which will be computed + * in probe and stored in edma_percpu_info->tx_start variable. + * We will shift the status bit by tx_start to obtain + * status bits for the core on which the current processing + * is happening. Since, there are 4 tx queues per core, + * we will run the loop till we get the correct queue to clear. + */ + while (edma_percpu_info->tx_status) { + queue_id = ffs(edma_percpu_info->tx_status) - 1; + edma_tx_complete(edma_cinfo, queue_id); + edma_percpu_info->tx_status &= ~(1 << queue_id); + } + + /* Every core will have a start, which will be computed + * in probe and stored in edma_percpu_info->tx_start variable. + * We will shift the status bit by tx_start to obtain + * status bits for the core on which the current processing + * is happening. Since, there are 4 tx queues per core, we + * will run the loop till we get the correct queue to clear. + */ + while (edma_percpu_info->rx_status) { + queue_id = ffs(edma_percpu_info->rx_status) - 1; + edma_rx_complete(edma_cinfo, &work_done, + budget, queue_id, napi); + + if (likely(work_done < budget)) + edma_percpu_info->rx_status &= ~(1 << queue_id); + else + break; + } + + /* Clear the status register, to avoid the interrupts to + * reoccur.This clearing of interrupt status register is + * done here as writing to status register only takes place + * once the producer/consumer index has been updated to + * reflect that the packet transmission/reception went fine. + */ + edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status); + edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status); + + /* If budget not fully consumed, exit the polling mode */ + if (likely(work_done < budget)) { + napi_complete(napi); + + /* re-enable the interrupts */ + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1); + for (i = 0; i < edma_cinfo->num_txq_per_core; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1); + } + + return work_done; +} + +/* edma interrupt() + * interrupt handler + */ +irqreturn_t edma_interrupt(int irq, void *dev) +{ + struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev; + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; + int i; + + /* Unmask the TX/RX interrupt register */ + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0); + + for (i = 0; i < edma_cinfo->num_txq_per_core; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0); + + napi_schedule(&edma_percpu_info->napi); + + return IRQ_HANDLED; +} --- /dev/null +++ b/drivers/net/ethernet/qualcomm/essedma/edma.h @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _EDMA_H_ +#define _EDMA_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ess_edma.h" + +#define EDMA_CPU_CORES_SUPPORTED 4 +#define EDMA_MAX_PORTID_SUPPORTED 5 +#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED +#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1) +#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */ +#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */ + +#define EDMA_MAX_RECEIVE_QUEUE 8 +#define EDMA_MAX_TRANSMIT_QUEUE 16 + +/* WAN/LAN adapter number */ +#define EDMA_WAN 0 +#define EDMA_LAN 1 + +/* VLAN tag */ +#define EDMA_LAN_DEFAULT_VLAN 1 +#define EDMA_WAN_DEFAULT_VLAN 2 + +#define EDMA_DEFAULT_GROUP1_VLAN 1 +#define EDMA_DEFAULT_GROUP2_VLAN 2 +#define EDMA_DEFAULT_GROUP3_VLAN 3 +#define EDMA_DEFAULT_GROUP4_VLAN 4 +#define EDMA_DEFAULT_GROUP5_VLAN 5 + +/* Queues exposed to linux kernel */ +#define EDMA_NETDEV_TX_QUEUE 4 +#define EDMA_NETDEV_RX_QUEUE 4 + +/* Number of queues per core */ +#define EDMA_NUM_TXQ_PER_CORE 4 +#define EDMA_NUM_RXQ_PER_CORE 2 + +#define EDMA_TPD_EOP_SHIFT 31 + +#define EDMA_PORT_ID_SHIFT 12 +#define EDMA_PORT_ID_MASK 0x7 + +/* tpd word 3 bit 18-28 */ +#define EDMA_TPD_PORT_BITMAP_SHIFT 18 + +#define EDMA_TPD_FROM_CPU_SHIFT 25 + +#define EDMA_FROM_CPU_MASK 0x80 +#define EDMA_SKB_PRIORITY_MASK 0x38 + +/* TX/RX descriptor ring count */ +/* should be a power of 2 */ +#define EDMA_RX_RING_SIZE 128 +#define EDMA_TX_RING_SIZE 128 + +/* Flags used in paged/non paged mode */ +#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256 +#define EDMA_RX_HEAD_BUFF_SIZE 1540 + +/* MAX frame size supported by switch */ +#define EDMA_MAX_JUMBO_FRAME_SIZE 9216 + +/* Configurations */ +#define EDMA_INTR_CLEAR_TYPE 0 +#define EDMA_INTR_SW_IDX_W_TYPE 0 +#define EDMA_FIFO_THRESH_TYPE 0 +#define EDMA_RSS_TYPE 0 +#define EDMA_RX_IMT 0x0020 +#define EDMA_TX_IMT 0x0050 +#define EDMA_TPD_BURST 5 +#define EDMA_TXF_BURST 0x100 +#define EDMA_RFD_BURST 8 +#define EDMA_RFD_THR 16 +#define EDMA_RFD_LTHR 0 + +/* RX/TX per CPU based mask/shift */ +#define EDMA_TX_PER_CPU_MASK 0xF +#define EDMA_RX_PER_CPU_MASK 0x3 +#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2 +#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1 +#define EDMA_TX_CPU_START_SHIFT 0x2 +#define EDMA_RX_CPU_START_SHIFT 0x1 + +/* FLags used in transmit direction */ +#define EDMA_HW_CHECKSUM 0x00000001 +#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002 +#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004 + +#define EDMA_SW_DESC_FLAG_LAST 0x1 +#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2 +#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4 +#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8 +#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10 +#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20 + + +#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1) + +/* Ethtool specific list of EDMA supported features */ +#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \ + | SUPPORTED_10baseT_Full \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_1000baseT_Full) + +/* Recevie side atheros Header */ +#define EDMA_RX_ATH_HDR_VERSION 0x2 +#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14 +#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11 +#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6 +#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4 + +/* Transmit side atheros Header */ +#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F +#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80 +#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7 + +#define EDMA_TXQ_START_CORE0 8 +#define EDMA_TXQ_START_CORE1 12 +#define EDMA_TXQ_START_CORE2 0 +#define EDMA_TXQ_START_CORE3 4 + +#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00 +#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000 +#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F +#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0 + +#define EDMA_ETH_HDR_LEN 12 +#define EDMA_ETH_TYPE_MASK 0xFFFF + +#define EDMA_RX_BUFFER_WRITE 16 +#define EDMA_RFD_AVAIL_THR 80 + +#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR + +extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst, + __be16 sport, __be16 dport, + uint8_t proto, u16 loadbalance, bool action); +struct edma_ethtool_statistics { + u32 tx_q0_pkt; + u32 tx_q1_pkt; + u32 tx_q2_pkt; + u32 tx_q3_pkt; + u32 tx_q4_pkt; + u32 tx_q5_pkt; + u32 tx_q6_pkt; + u32 tx_q7_pkt; + u32 tx_q8_pkt; + u32 tx_q9_pkt; + u32 tx_q10_pkt; + u32 tx_q11_pkt; + u32 tx_q12_pkt; + u32 tx_q13_pkt; + u32 tx_q14_pkt; + u32 tx_q15_pkt; + u32 tx_q0_byte; + u32 tx_q1_byte; + u32 tx_q2_byte; + u32 tx_q3_byte; + u32 tx_q4_byte; + u32 tx_q5_byte; + u32 tx_q6_byte; + u32 tx_q7_byte; + u32 tx_q8_byte; + u32 tx_q9_byte; + u32 tx_q10_byte; + u32 tx_q11_byte; + u32 tx_q12_byte; + u32 tx_q13_byte; + u32 tx_q14_byte; + u32 tx_q15_byte; + u32 rx_q0_pkt; + u32 rx_q1_pkt; + u32 rx_q2_pkt; + u32 rx_q3_pkt; + u32 rx_q4_pkt; + u32 rx_q5_pkt; + u32 rx_q6_pkt; + u32 rx_q7_pkt; + u32 rx_q0_byte; + u32 rx_q1_byte; + u32 rx_q2_byte; + u32 rx_q3_byte; + u32 rx_q4_byte; + u32 rx_q5_byte; + u32 rx_q6_byte; + u32 rx_q7_byte; + u32 tx_desc_error; +}; + +struct edma_mdio_data { + struct mii_bus *mii_bus; + void __iomem *membase; + int phy_irq[PHY_MAX_ADDR]; +}; + +/* EDMA LINK state */ +enum edma_link_state { + __EDMA_LINKUP, /* Indicate link is UP */ + __EDMA_LINKDOWN /* Indicate link is down */ +}; + +/* EDMA GMAC state */ +enum edma_gmac_state { + __EDMA_UP /* use to indicate GMAC is up */ +}; + +/* edma transmit descriptor */ +struct edma_tx_desc { + __le16 len; /* full packet including CRC */ + __le16 svlan_tag; /* vlan tag */ + __le32 word1; /* byte 4-7 */ + __le32 addr; /* address of buffer */ + __le32 word3; /* byte 12 */ +}; + +/* edma receive return descriptor */ +struct edma_rx_return_desc { + u16 rrd0; + u16 rrd1; + u16 rrd2; + u16 rrd3; + u16 rrd4; + u16 rrd5; + u16 rrd6; + u16 rrd7; +}; + +/* RFD descriptor */ +struct edma_rx_free_desc { + __le32 buffer_addr; /* buffer address */ +}; + +/* edma hw specific data */ +struct edma_hw { + u32 __iomem *hw_addr; /* inner register address */ + struct edma_adapter *adapter; /* netdevice adapter */ + u32 rx_intr_mask; /*rx interrupt mask */ + u32 tx_intr_mask; /* tx interrupt nask */ + u32 misc_intr_mask; /* misc interrupt mask */ + u32 wol_intr_mask; /* wake on lan interrupt mask */ + bool intr_clear_type; /* interrupt clear */ + bool intr_sw_idx_w; /* interrupt software index */ + u32 rx_head_buff_size; /* Rx buffer size */ + u8 rss_type; /* rss protocol type */ +}; + +/* edma_sw_desc stores software descriptor + * SW descriptor has 1:1 map with HW descriptor + */ +struct edma_sw_desc { + struct sk_buff *skb; + dma_addr_t dma; /* dma address */ + u16 length; /* Tx/Rx buffer length */ + u32 flags; +}; + +/* per core related information */ +struct edma_per_cpu_queues_info { + struct napi_struct napi; /* napi associated with the core */ + u32 tx_mask; /* tx interrupt mask */ + u32 rx_mask; /* rx interrupt mask */ + u32 tx_status; /* tx interrupt status */ + u32 rx_status; /* rx interrupt status */ + u32 tx_start; /* tx queue start */ + u32 rx_start; /* rx queue start */ + struct edma_common_info *edma_cinfo; /* edma common info */ +}; + +/* edma specific common info */ +struct edma_common_info { + struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */ + struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */ + struct platform_device *pdev; /* device structure */ + struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED]; + struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX]; + struct ctl_table_header *edma_ctl_table_hdr; + int num_gmac; + struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */ + int num_rx_queues; /* number of rx queue */ + u32 num_tx_queues; /* number of tx queue */ + u32 tx_irq[16]; /* number of tx irq */ + u32 rx_irq[8]; /* number of rx irq */ + u32 from_cpu; /* from CPU TPD field */ + u32 num_rxq_per_core; /* Rx queues per core */ + u32 num_txq_per_core; /* Tx queues per core */ + u16 tx_ring_count; /* Tx ring count */ + u16 rx_ring_count; /* Rx ring*/ + u16 rx_head_buffer_len; /* rx buffer length */ + u16 rx_page_buffer_len; /* rx buffer length */ + u32 page_mode; /* Jumbo frame supported flag */ + u32 fraglist_mode; /* fraglist supported flag */ + struct edma_hw hw; /* edma hw specific structure */ + struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */ + spinlock_t stats_lock; /* protect edma stats area for updation */ + struct timer_list edma_stats_timer; +}; + +/* transimit packet descriptor (tpd) ring */ +struct edma_tx_desc_ring { + struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */ + struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE]; + /* Array of netdevs associated with the tpd ring */ + void *hw_desc; /* descriptor ring virtual address */ + struct edma_sw_desc *sw_desc; /* buffer associated with ring */ + int netdev_bmp; /* Bitmap for per-ring netdevs */ + u32 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 sw_next_to_fill; /* next Tx descriptor to fill */ + u16 sw_next_to_clean; /* next Tx descriptor to clean */ +}; + +/* receive free descriptor (rfd) ring */ +struct edma_rfd_desc_ring { + void *hw_desc; /* descriptor ring virtual address */ + struct edma_sw_desc *sw_desc; /* buffer associated with ring */ + u16 size; /* bytes allocated to sw_desc */ + u16 count; /* number of descriptors in the ring */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 sw_next_to_fill; /* next descriptor to fill */ + u16 sw_next_to_clean; /* next descriptor to clean */ +}; + +/* edma_rfs_flter_node - rfs filter node in hash table */ +struct edma_rfs_filter_node { + struct flow_keys keys; + u32 flow_id; /* flow_id of filter provided by kernel */ + u16 filter_id; /* filter id of filter returned by adaptor */ + u16 rq_id; /* desired rq index */ + struct hlist_node node; /* edma rfs list node */ +}; + +/* edma_rfs_flow_tbl - rfs flow table */ +struct edma_rfs_flow_table { + u16 max_num_filter; /* Maximum number of filters edma supports */ + u16 hashtoclean; /* hash table index to clean next */ + int filter_available; /* Number of free filters available */ + struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES]; + spinlock_t rfs_ftab_lock; + struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */ +}; + +/* EDMA net device structure */ +struct edma_adapter { + struct net_device *netdev; /* netdevice */ + struct platform_device *pdev; /* platform device */ + struct edma_common_info *edma_cinfo; /* edma common info */ + struct phy_device *phydev; /* Phy device */ + struct edma_rfs_flow_table rfs; /* edma rfs flow table */ + struct net_device_stats stats; /* netdev statistics */ + set_rfs_filter_callback_t set_rfs_rule; + u32 flags;/* status flags */ + unsigned long state_flags; /* GMAC up/down flags */ + u32 forced_speed; /* link force speed */ + u32 forced_duplex; /* link force duplex */ + u32 link_state; /* phy link state */ + u32 phy_mdio_addr; /* PHY device address on MII interface */ + u32 poll_required; /* check if link polling is required */ + u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */ + u32 default_vlan_tag; /* vlan tag */ + u32 dp_bitmap; + uint8_t phy_id[MII_BUS_ID_SIZE + 3]; +}; + +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo); +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo); +int edma_open(struct net_device *netdev); +int edma_close(struct net_device *netdev); +void edma_free_tx_resources(struct edma_common_info *edma_c_info); +void edma_free_rx_resources(struct edma_common_info *edma_c_info); +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo); +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo); +void edma_free_tx_rings(struct edma_common_info *edma_cinfo); +void edma_free_rx_rings(struct edma_common_info *edma_cinfo); +void edma_free_queues(struct edma_common_info *edma_cinfo); +void edma_irq_disable(struct edma_common_info *edma_cinfo); +int edma_reset(struct edma_common_info *edma_cinfo); +int edma_poll(struct napi_struct *napi, int budget); +netdev_tx_t edma_xmit(struct sk_buff *skb, + struct net_device *netdev); +int edma_configure(struct edma_common_info *edma_cinfo); +void edma_irq_enable(struct edma_common_info *edma_cinfo); +void edma_enable_tx_ctrl(struct edma_hw *hw); +void edma_enable_rx_ctrl(struct edma_hw *hw); +void edma_stop_rx_tx(struct edma_hw *hw); +void edma_free_irqs(struct edma_adapter *adapter); +irqreturn_t edma_interrupt(int irq, void *dev); +void edma_write_reg(u16 reg_addr, u32 reg_value); +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value); +struct net_device_stats *edma_get_stats(struct net_device *netdev); +int edma_set_mac_addr(struct net_device *netdev, void *p); +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq, u32 flow_id); +int edma_register_rfs_filter(struct net_device *netdev, + set_rfs_filter_callback_t set_filter); +void edma_flow_may_expire(struct timer_list *t); +void edma_set_ethtool_ops(struct net_device *netdev); +void edma_set_stp_rstp(bool tag); +void edma_assign_ath_hdr_type(int tag); +int edma_get_default_vlan_tag(struct net_device *netdev); +void edma_adjust_link(struct net_device *netdev); +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id); +void edma_read_append_stats(struct edma_common_info *edma_cinfo); +void edma_change_tx_coalesce(int usecs); +void edma_change_rx_coalesce(int usecs); +void edma_get_tx_rx_coalesce(u32 *reg_val); +void edma_clear_irq_status(void); +#endif /* _EDMA_H_ */ --- /dev/null +++ b/drivers/net/ethernet/qualcomm/essedma/edma_axi.c @@ -0,0 +1,1216 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "edma.h" +#include "ess_edma.h" + +/* Weight round robin and virtual QID mask */ +#define EDMA_WRR_VID_SCTL_MASK 0xffff + +/* Weight round robin and virtual QID shift */ +#define EDMA_WRR_VID_SCTL_SHIFT 16 + +char edma_axi_driver_name[] = "ess_edma"; +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; + +static u32 edma_hw_addr; + +char edma_tx_irq[16][64]; +char edma_rx_irq[8][64]; +struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; +static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1, + EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3}; +static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1, + EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3}; + +static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN; +static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN; +static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN; +static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN; +static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN; +static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN; +static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN; +static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE; +static u32 edma_rss_idt_idx; + +static int edma_weight_assigned_to_q __read_mostly; +static int edma_queue_to_virtual_q __read_mostly; +static bool edma_enable_rstp __read_mostly; +static int edma_athr_hdr_eth_type __read_mostly; + +static int page_mode; +module_param(page_mode, int, 0); +MODULE_PARM_DESC(page_mode, "enable page mode"); + +static int overwrite_mode; +module_param(overwrite_mode, int, 0); +MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting"); + +static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE; +module_param(jumbo_mru, int, 0); +MODULE_PARM_DESC(jumbo_mru, "enable fraglist support"); + +static int num_rxq = 4; +module_param(num_rxq, int, 0); +MODULE_PARM_DESC(num_rxq, "change the number of rx queues"); + +void edma_write_reg(u16 reg_addr, u32 reg_value) +{ + writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr))); +} + +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value) +{ + *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr)); +} + +/* edma_change_tx_coalesce() + * change tx interrupt moderation timer + */ +void edma_change_tx_coalesce(int usecs) +{ + u32 reg_value; + + /* Here, we right shift the value from the user by 1, this is + * done because IMT resolution timer is 2usecs. 1 count + * of this register corresponds to 2 usecs. + */ + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value); + reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16)); + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value); +} + +/* edma_change_rx_coalesce() + * change rx interrupt moderation timer + */ +void edma_change_rx_coalesce(int usecs) +{ + u32 reg_value; + + /* Here, we right shift the value from the user by 1, this is + * done because IMT resolution timer is 2usecs. 1 count + * of this register corresponds to 2 usecs. + */ + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value); + reg_value = ((reg_value & 0xffff0000) | (usecs >> 1)); + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value); +} + +/* edma_get_tx_rx_coalesce() + * Get tx/rx interrupt moderation value + */ +void edma_get_tx_rx_coalesce(u32 *reg_val) +{ + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val); +} + +void edma_read_append_stats(struct edma_common_info *edma_cinfo) +{ + uint32_t *p; + int i; + u32 stat; + + spin_lock_bh(&edma_cinfo->stats_lock); + p = (uint32_t *)&(edma_cinfo->edma_ethstats); + + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) { + edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat); + *p += stat; + p++; + } + + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) { + edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat); + *p += stat; + p++; + } + + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) { + edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat); + *p += stat; + p++; + } + + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) { + edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat); + *p += stat; + p++; + } + + spin_unlock_bh(&edma_cinfo->stats_lock); +} + +static void edma_statistics_timer(struct timer_list *t) +{ + struct edma_common_info *edma_cinfo = + from_timer(edma_cinfo, t, edma_stats_timer); + + edma_read_append_stats(edma_cinfo); + + mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ); +} + +static int edma_enable_stp_rstp(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) + edma_set_stp_rstp(edma_enable_rstp); + + return ret; +} + +static int edma_ath_hdr_eth_type(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) + edma_assign_ath_hdr_type(edma_athr_hdr_eth_type); + + return ret; +} + +static int edma_change_default_lan_vlan(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + int ret; + + if (!edma_netdev[1]) { + pr_err("Netdevice for default_lan does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[1]); + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_ltag; + + return ret; +} + +static int edma_change_default_wan_vlan(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + int ret; + + if (!edma_netdev[0]) { + pr_err("Netdevice for default_wan does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[0]); + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_wtag; + + return ret; +} + +static int edma_change_group1_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[0]) { + pr_err("Netdevice for Group 1 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[0]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group1_vtag; + + return ret; +} + +static int edma_change_group2_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[1]) { + pr_err("Netdevice for Group 2 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[1]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group2_vtag; + + return ret; +} + +static int edma_change_group3_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[2]) { + pr_err("Netdevice for Group 3 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[2]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group3_vtag; + + return ret; +} + +static int edma_change_group4_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[3]) { + pr_err("Netdevice for Group 4 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[3]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group4_vtag; + + return ret; +} + +static int edma_change_group5_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[4]) { + pr_err("Netdevice for Group 5 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[4]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group5_vtag; + + return ret; +} + +static int edma_set_rss_idt_value(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write && !ret) + edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx), + edma_rss_idt_val); + return ret; +} + +static int edma_set_rss_idt_idx(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + u32 old_value = edma_rss_idt_idx; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (!write || ret) + return ret; + + if (edma_rss_idt_idx >= EDMA_NUM_IDT) { + pr_err("Invalid RSS indirection table index %d\n", + edma_rss_idt_idx); + edma_rss_idt_idx = old_value; + return -EINVAL; + } + return ret; +} + +static int edma_weight_assigned_to_queues(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret, queue_id, weight; + u32 reg_data, data, reg_addr; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) { + queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK; + if (queue_id < 0 || queue_id > 15) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT; + if (weight < 0 || weight > 0xF) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + data = weight << EDMA_WRR_SHIFT(queue_id); + + reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3); + edma_read_reg(reg_addr, ®_data); + reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id)); + edma_write_reg(reg_addr, data | reg_data); + } + + return ret; +} + +static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret, queue_id, virtual_qid; + u32 reg_data, data, reg_addr; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) { + queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK; + if (queue_id < 0 || queue_id > 15) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + virtual_qid = edma_queue_to_virtual_q >> + EDMA_WRR_VID_SCTL_SHIFT; + if (virtual_qid < 0 || virtual_qid > 8) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id); + + reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3); + edma_read_reg(reg_addr, ®_data); + reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id)); + edma_write_reg(reg_addr, data | reg_data); + } + + return ret; +} + +static struct ctl_table edma_table[] = { + { + .procname = "default_lan_tag", + .data = &edma_default_ltag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_default_lan_vlan + }, + { + .procname = "default_wan_tag", + .data = &edma_default_wtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_default_wan_vlan + }, + { + .procname = "weight_assigned_to_queues", + .data = &edma_weight_assigned_to_q, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_weight_assigned_to_queues + }, + { + .procname = "queue_to_virtual_queue_map", + .data = &edma_queue_to_virtual_q, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_queue_to_virtual_queue_map + }, + { + .procname = "enable_stp_rstp", + .data = &edma_enable_rstp, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_enable_stp_rstp + }, + { + .procname = "athr_hdr_eth_type", + .data = &edma_athr_hdr_eth_type, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_ath_hdr_eth_type + }, + { + .procname = "default_group1_vlan_tag", + .data = &edma_default_group1_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group1_vtag + }, + { + .procname = "default_group2_vlan_tag", + .data = &edma_default_group2_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group2_vtag + }, + { + .procname = "default_group3_vlan_tag", + .data = &edma_default_group3_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group3_vtag + }, + { + .procname = "default_group4_vlan_tag", + .data = &edma_default_group4_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group4_vtag + }, + { + .procname = "default_group5_vlan_tag", + .data = &edma_default_group5_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group5_vtag + }, + { + .procname = "edma_rss_idt_value", + .data = &edma_rss_idt_val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_set_rss_idt_value + }, + { + .procname = "edma_rss_idt_idx", + .data = &edma_rss_idt_idx, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_set_rss_idt_idx + }, + {} +}; + +/* edma_axi_netdev_ops + * Describe the operations supported by registered netdevices + * + * static const struct net_device_ops edma_axi_netdev_ops = { + * .ndo_open = edma_open, + * .ndo_stop = edma_close, + * .ndo_start_xmit = edma_xmit_frame, + * .ndo_set_mac_address = edma_set_mac_addr, + * } + */ +static const struct net_device_ops edma_axi_netdev_ops = { + .ndo_open = edma_open, + .ndo_stop = edma_close, + .ndo_start_xmit = edma_xmit, + .ndo_set_mac_address = edma_set_mac_addr, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = edma_rx_flow_steer, + .ndo_register_rfs_filter = edma_register_rfs_filter, + .ndo_get_default_vlan_tag = edma_get_default_vlan_tag, +#endif + .ndo_get_stats = edma_get_stats, +}; + +/* edma_axi_probe() + * Initialise an adapter identified by a platform_device structure. + * + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur in the probe. + */ +static int edma_axi_probe(struct platform_device *pdev) +{ + struct edma_common_info *edma_cinfo; + struct edma_hw *hw; + struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED]; + struct resource *res; + struct device_node *np = pdev->dev.of_node; + struct device_node *pnp; + struct device_node *mdio_node = NULL; + struct platform_device *mdio_plat = NULL; + struct mii_bus *miibus = NULL; + struct edma_mdio_data *mdio_data = NULL; + int i, j, k, err = 0; + int portid_bmp; + int idx = 0, idx_mac = 0; + + if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) { + dev_err(&pdev->dev, "Invalid CPU Cores\n"); + return -EINVAL; + } + + if ((num_rxq != 4) && (num_rxq != 8)) { + dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n"); + return -EINVAL; + } + edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL); + if (!edma_cinfo) { + err = -ENOMEM; + goto err_alloc; + } + + edma_cinfo->pdev = pdev; + + of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac); + if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) { + pr_err("Invalid DTSI Entry for qcom,num_gmac\n"); + err = -EINVAL; + goto err_cinfo; + } + + /* Initialize the netdev array before allocation + * to avoid double free + */ + for (i = 0 ; i < edma_cinfo->num_gmac ; i++) + edma_netdev[i] = NULL; + + for (i = 0 ; i < edma_cinfo->num_gmac ; i++) { + edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter), + EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE); + + if (!edma_netdev[i]) { + dev_err(&pdev->dev, + "net device alloc fails for index=%d\n", i); + err = -ENODEV; + goto err_ioremap; + } + + SET_NETDEV_DEV(edma_netdev[i], &pdev->dev); + platform_set_drvdata(pdev, edma_netdev[i]); + edma_cinfo->netdev[i] = edma_netdev[i]; + } + + /* Fill ring details */ + edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE; + edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4); + edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE; + + /* Update num rx queues based on module parameter */ + edma_cinfo->num_rx_queues = num_rxq; + edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2); + + edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE; + + hw = &edma_cinfo->hw; + + /* Fill HW defaults */ + hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK; + hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK; + + of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode); + of_property_read_u32(np, "qcom,rx_head_buf_size", + &hw->rx_head_buff_size); + + if (overwrite_mode) { + dev_info(&pdev->dev, "page mode overwritten"); + edma_cinfo->page_mode = page_mode; + } + + if (jumbo_mru) + edma_cinfo->fraglist_mode = 1; + + if (edma_cinfo->page_mode) + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO; + else if (edma_cinfo->fraglist_mode) + hw->rx_head_buff_size = jumbo_mru; + else if (!hw->rx_head_buff_size) + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE; + + hw->misc_intr_mask = 0; + hw->wol_intr_mask = 0; + + hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE; + hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE; + + /* configure RSS type to the different protocol that can be + * supported + */ + hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP | + EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP | + EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(edma_cinfo->hw.hw_addr)) { + err = PTR_ERR(edma_cinfo->hw.hw_addr); + goto err_ioremap; + } + + edma_hw_addr = (u32)edma_cinfo->hw.hw_addr; + + /* Parse tx queue interrupt number from device tree */ + for (i = 0; i < edma_cinfo->num_tx_queues; i++) + edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i); + + /* Parse rx queue interrupt number from device tree + * Here we are setting j to point to the point where we + * left tx interrupt parsing(i.e 16) and run run the loop + * from 0 to 7 to parse rx interrupt number. + */ + for (i = 0, j = edma_cinfo->num_tx_queues, k = 0; + i < edma_cinfo->num_rx_queues; i++) { + edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j); + k += ((num_rxq == 4) ? 2 : 1); + j += ((num_rxq == 4) ? 2 : 1); + } + + edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size; + edma_cinfo->rx_page_buffer_len = PAGE_SIZE; + + err = edma_alloc_queues_tx(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of TX queue failed\n"); + goto err_tx_qinit; + } + + err = edma_alloc_queues_rx(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of RX queue failed\n"); + goto err_rx_qinit; + } + + err = edma_alloc_tx_rings(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of TX resources failed\n"); + goto err_tx_rinit; + } + + err = edma_alloc_rx_rings(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of RX resources failed\n"); + goto err_rx_rinit; + } + + /* Initialize netdev and netdev bitmap for transmit descriptor rings */ + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i]; + int j; + + etdr->netdev_bmp = 0; + for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) { + etdr->netdev[j] = NULL; + etdr->nq[j] = NULL; + } + } + + if (of_property_read_bool(np, "qcom,mdio_supported")) { + mdio_node = of_find_compatible_node(NULL, NULL, + "qcom,ipq4019-mdio"); + if (!mdio_node) { + dev_err(&pdev->dev, "cannot find mdio node by phandle"); + err = -EIO; + goto err_mdiobus_init_fail; + } + + mdio_plat = of_find_device_by_node(mdio_node); + if (!mdio_plat) { + dev_err(&pdev->dev, + "cannot find platform device from mdio node"); + of_node_put(mdio_node); + err = -EIO; + goto err_mdiobus_init_fail; + } + + mdio_data = dev_get_drvdata(&mdio_plat->dev); + if (!mdio_data) { + dev_err(&pdev->dev, + "cannot get mii bus reference from device data"); + of_node_put(mdio_node); + err = -EIO; + goto err_mdiobus_init_fail; + } + + miibus = mdio_data->mii_bus; + } + + for_each_available_child_of_node(np, pnp) { + const char *mac_addr; + + /* this check is needed if parent and daughter dts have + * different number of gmac nodes + */ + if (idx_mac == edma_cinfo->num_gmac) { + of_node_put(np); + break; + } + + mac_addr = of_get_mac_address(pnp); + if (mac_addr) + memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN); + + idx_mac++; + } + + /* Populate the adapter structure register the netdevice */ + for (i = 0; i < edma_cinfo->num_gmac; i++) { + int k, m; + + adapter[i] = netdev_priv(edma_netdev[i]); + adapter[i]->netdev = edma_netdev[i]; + adapter[i]->pdev = pdev; + for (j = 0; j < CONFIG_NR_CPUS; j++) { + m = i % 2; + adapter[i]->tx_start_offset[j] = + ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1)); + /* Share the queues with available net-devices. + * For instance , with 5 net-devices + * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13 + * and eth1/eth3 will get the remaining. + */ + for (k = adapter[i]->tx_start_offset[j]; k < + (adapter[i]->tx_start_offset[j] + 2); k++) { + if (edma_fill_netdev(edma_cinfo, k, i, j)) { + pr_err("Netdev overflow Error\n"); + goto err_register; + } + } + } + + adapter[i]->edma_cinfo = edma_cinfo; + edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops; + edma_netdev[i]->max_mtu = 9000; + edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM + | NETIF_F_HW_VLAN_CTAG_TX + | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO; + edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX + | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GRO; + edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GRO; + edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GRO; + +#ifdef CONFIG_RFS_ACCEL + edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; + edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; + edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; + edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE; +#endif + edma_set_ethtool_ops(edma_netdev[i]); + + /* This just fill in some default MAC address + */ + if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) { + random_ether_addr(edma_netdev[i]->dev_addr); + pr_info("EDMA using MAC@ - using"); + pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + *(edma_netdev[i]->dev_addr), + *(edma_netdev[i]->dev_addr + 1), + *(edma_netdev[i]->dev_addr + 2), + *(edma_netdev[i]->dev_addr + 3), + *(edma_netdev[i]->dev_addr + 4), + *(edma_netdev[i]->dev_addr + 5)); + } + + err = register_netdev(edma_netdev[i]); + if (err) + goto err_register; + + /* carrier off reporting is important to + * ethtool even BEFORE open + */ + netif_carrier_off(edma_netdev[i]); + + /* Allocate reverse irq cpu mapping structure for + * receive queues + */ +#ifdef CONFIG_RFS_ACCEL + edma_netdev[i]->rx_cpu_rmap = + alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE); + if (!edma_netdev[i]->rx_cpu_rmap) { + err = -ENOMEM; + goto err_rmap_alloc_fail; + } +#endif + } + + for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++) + edma_cinfo->portid_netdev_lookup_tbl[i] = NULL; + + for_each_available_child_of_node(np, pnp) { + const uint32_t *vlan_tag = NULL; + int len; + + /* this check is needed if parent and daughter dts have + * different number of gmac nodes + */ + if (idx == edma_cinfo->num_gmac) + break; + + /* Populate port-id to netdev lookup table */ + vlan_tag = of_get_property(pnp, "vlan_tag", &len); + if (!vlan_tag) { + pr_err("Vlan tag parsing Failed.\n"); + goto err_rmap_alloc_fail; + } + + adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1); + vlan_tag++; + portid_bmp = of_read_number(vlan_tag, 1); + adapter[idx]->dp_bitmap = portid_bmp; + + portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */ + while (portid_bmp) { + int port_bit = ffs(portid_bmp); + + if (port_bit > EDMA_MAX_PORTID_SUPPORTED) + goto err_rmap_alloc_fail; + edma_cinfo->portid_netdev_lookup_tbl[port_bit] = + edma_netdev[idx]; + portid_bmp &= ~(1 << (port_bit - 1)); + } + + if (!of_property_read_u32(pnp, "qcom,poll_required", + &adapter[idx]->poll_required)) { + if (adapter[idx]->poll_required) { + of_property_read_u32(pnp, "qcom,phy_mdio_addr", + &adapter[idx]->phy_mdio_addr); + of_property_read_u32(pnp, "qcom,forced_speed", + &adapter[idx]->forced_speed); + of_property_read_u32(pnp, "qcom,forced_duplex", + &adapter[idx]->forced_duplex); + + /* create a phyid using MDIO bus id + * and MDIO bus address + */ + snprintf(adapter[idx]->phy_id, + MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + miibus->id, + adapter[idx]->phy_mdio_addr); + } + } else { + adapter[idx]->poll_required = 0; + adapter[idx]->forced_speed = SPEED_1000; + adapter[idx]->forced_duplex = DUPLEX_FULL; + } + + idx++; + } + + edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net, + "net/edma", + edma_table); + if (!edma_cinfo->edma_ctl_table_hdr) { + dev_err(&pdev->dev, "edma sysctl table hdr not registered\n"); + goto err_unregister_sysctl_tbl; + } + + /* Disable all 16 Tx and 8 rx irqs */ + edma_irq_disable(edma_cinfo); + + err = edma_reset(edma_cinfo); + if (err) { + err = -EIO; + goto err_reset; + } + + /* populate per_core_info, do a napi_Add, request 16 TX irqs, + * 8 RX irqs, do a napi enable + */ + for (i = 0; i < CONFIG_NR_CPUS; i++) { + u8 rx_start; + + edma_cinfo->edma_percpu_info[i].napi.state = 0; + + netif_napi_add(edma_netdev[0], + &edma_cinfo->edma_percpu_info[i].napi, + edma_poll, 64); + napi_enable(&edma_cinfo->edma_percpu_info[i].napi); + edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i]; + edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK + << (i << EDMA_RX_PER_CPU_MASK_SHIFT); + edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i]; + edma_cinfo->edma_percpu_info[i].rx_start = + i << EDMA_RX_CPU_START_SHIFT; + rx_start = i << EDMA_RX_CPU_START_SHIFT; + edma_cinfo->edma_percpu_info[i].tx_status = 0; + edma_cinfo->edma_percpu_info[i].rx_status = 0; + edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo; + + /* Request irq per core */ + for (j = edma_cinfo->edma_percpu_info[i].tx_start; + j < tx_start[i] + 4; j++) { + sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j); + err = request_irq(edma_cinfo->tx_irq[j], + edma_interrupt, + 0, + &edma_tx_irq[j][0], + &edma_cinfo->edma_percpu_info[i]); + if (err) + goto err_reset; + } + + for (j = edma_cinfo->edma_percpu_info[i].rx_start; + j < (rx_start + + ((edma_cinfo->num_rx_queues == 4) ? 1 : 2)); + j++) { + sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j); + err = request_irq(edma_cinfo->rx_irq[j], + edma_interrupt, + 0, + &edma_rx_irq[j][0], + &edma_cinfo->edma_percpu_info[i]); + if (err) + goto err_reset; + } + +#ifdef CONFIG_RFS_ACCEL + for (j = edma_cinfo->edma_percpu_info[i].rx_start; + j < rx_start + 2; j += 2) { + err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap, + edma_cinfo->rx_irq[j]); + if (err) + goto err_rmap_add_fail; + } +#endif + } + + /* Used to clear interrupt status, allocate rx buffer, + * configure edma descriptors registers + */ + err = edma_configure(edma_cinfo); + if (err) { + err = -EIO; + goto err_configure; + } + + /* Configure RSS indirection table. + * 128 hash will be configured in the following + * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively + * and so on + */ + for (i = 0; i < EDMA_NUM_IDT; i++) + edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE); + + /* Configure load balance mapping table. + * 4 table entry will be configured according to the + * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4} + * respectively. + */ + edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE); + + /* Configure Virtual queue for Tx rings + * User can also change this value runtime through + * a sysctl + */ + edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE); + edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE); + + /* Configure Max AXI Burst write size to 128 bytes*/ + edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE, + EDMA_AXIW_MAXWRSIZE_VALUE); + + /* Enable All 16 tx and 8 rx irq mask */ + edma_irq_enable(edma_cinfo); + edma_enable_tx_ctrl(&edma_cinfo->hw); + edma_enable_rx_ctrl(&edma_cinfo->hw); + + for (i = 0; i < edma_cinfo->num_gmac; i++) { + if (adapter[i]->poll_required) { + adapter[i]->phydev = + phy_connect(edma_netdev[i], + (const char *)adapter[i]->phy_id, + &edma_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (IS_ERR(adapter[i]->phydev)) { + dev_dbg(&pdev->dev, "PHY attach FAIL"); + err = -EIO; + goto edma_phy_attach_fail; + } else { + adapter[i]->phydev->advertising |= + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + adapter[i]->phydev->supported |= + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + } + } else { + adapter[i]->phydev = NULL; + } + } + + spin_lock_init(&edma_cinfo->stats_lock); + + timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0); + mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ); + + return 0; + +edma_phy_attach_fail: + miibus = NULL; +err_configure: +#ifdef CONFIG_RFS_ACCEL + for (i = 0; i < edma_cinfo->num_gmac; i++) { + free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap); + adapter[i]->netdev->rx_cpu_rmap = NULL; + } +#endif +err_rmap_add_fail: + edma_free_irqs(adapter[0]); + for (i = 0; i < CONFIG_NR_CPUS; i++) + napi_disable(&edma_cinfo->edma_percpu_info[i].napi); +err_reset: +err_unregister_sysctl_tbl: +err_rmap_alloc_fail: + for (i = 0; i < edma_cinfo->num_gmac; i++) + unregister_netdev(edma_netdev[i]); +err_register: +err_mdiobus_init_fail: + edma_free_rx_rings(edma_cinfo); +err_rx_rinit: + edma_free_tx_rings(edma_cinfo); +err_tx_rinit: + edma_free_queues(edma_cinfo); +err_rx_qinit: +err_tx_qinit: + iounmap(edma_cinfo->hw.hw_addr); +err_ioremap: + for (i = 0; i < edma_cinfo->num_gmac; i++) { + if (edma_netdev[i]) + free_netdev(edma_netdev[i]); + } +err_cinfo: + kfree(edma_cinfo); +err_alloc: + return err; +} + +/* edma_axi_remove() + * Device Removal Routine + * + * edma_axi_remove is called by the platform subsystem to alert the driver + * that it should release a platform device. + */ +static int edma_axi_remove(struct platform_device *pdev) +{ + struct edma_adapter *adapter = netdev_priv(edma_netdev[0]); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + struct edma_hw *hw = &edma_cinfo->hw; + int i; + + for (i = 0; i < edma_cinfo->num_gmac; i++) + unregister_netdev(edma_netdev[i]); + + edma_stop_rx_tx(hw); + for (i = 0; i < CONFIG_NR_CPUS; i++) + napi_disable(&edma_cinfo->edma_percpu_info[i].napi); + + edma_irq_disable(edma_cinfo); + edma_write_reg(EDMA_REG_RX_ISR, 0xff); + edma_write_reg(EDMA_REG_TX_ISR, 0xffff); +#ifdef CONFIG_RFS_ACCEL + for (i = 0; i < edma_cinfo->num_gmac; i++) { + free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap); + edma_netdev[i]->rx_cpu_rmap = NULL; + } +#endif + + for (i = 0; i < edma_cinfo->num_gmac; i++) { + struct edma_adapter *adapter = netdev_priv(edma_netdev[i]); + + if (adapter->phydev) + phy_disconnect(adapter->phydev); + } + + del_timer_sync(&edma_cinfo->edma_stats_timer); + edma_free_irqs(adapter); + unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr); + edma_free_tx_resources(edma_cinfo); + edma_free_rx_resources(edma_cinfo); + edma_free_tx_rings(edma_cinfo); + edma_free_rx_rings(edma_cinfo); + edma_free_queues(edma_cinfo); + for (i = 0; i < edma_cinfo->num_gmac; i++) + free_netdev(edma_netdev[i]); + + kfree(edma_cinfo); + + return 0; +} + +static const struct of_device_id edma_of_mtable[] = { + {.compatible = "qcom,ess-edma" }, + {} +}; +MODULE_DEVICE_TABLE(of, edma_of_mtable); + +static struct platform_driver edma_axi_driver = { + .driver = { + .name = edma_axi_driver_name, + .of_match_table = edma_of_mtable, + }, + .probe = edma_axi_probe, + .remove = edma_axi_remove, +}; + +module_platform_driver(edma_axi_driver); + +MODULE_AUTHOR("Qualcomm Atheros Inc"); +MODULE_DESCRIPTION("QCA ESS EDMA driver"); +MODULE_LICENSE("GPL"); --- /dev/null +++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "edma.h" + +struct edma_ethtool_stats { + uint8_t stat_string[ETH_GSTRING_LEN]; + uint32_t stat_offset; +}; + +#define EDMA_STAT(m) offsetof(struct edma_ethtool_statistics, m) +#define DRVINFO_LEN 32 + +/* Array of strings describing statistics + */ +static const struct edma_ethtool_stats edma_gstrings_stats[] = { + {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)}, + {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)}, + {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)}, + {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)}, + {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)}, + {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)}, + {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)}, + {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)}, + {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)}, + {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)}, + {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)}, + {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)}, + {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)}, + {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)}, + {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)}, + {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)}, + {"tx_q0_byte", EDMA_STAT(tx_q0_byte)}, + {"tx_q1_byte", EDMA_STAT(tx_q1_byte)}, + {"tx_q2_byte", EDMA_STAT(tx_q2_byte)}, + {"tx_q3_byte", EDMA_STAT(tx_q3_byte)}, + {"tx_q4_byte", EDMA_STAT(tx_q4_byte)}, + {"tx_q5_byte", EDMA_STAT(tx_q5_byte)}, + {"tx_q6_byte", EDMA_STAT(tx_q6_byte)}, + {"tx_q7_byte", EDMA_STAT(tx_q7_byte)}, + {"tx_q8_byte", EDMA_STAT(tx_q8_byte)}, + {"tx_q9_byte", EDMA_STAT(tx_q9_byte)}, + {"tx_q10_byte", EDMA_STAT(tx_q10_byte)}, + {"tx_q11_byte", EDMA_STAT(tx_q11_byte)}, + {"tx_q12_byte", EDMA_STAT(tx_q12_byte)}, + {"tx_q13_byte", EDMA_STAT(tx_q13_byte)}, + {"tx_q14_byte", EDMA_STAT(tx_q14_byte)}, + {"tx_q15_byte", EDMA_STAT(tx_q15_byte)}, + {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)}, + {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)}, + {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)}, + {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)}, + {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)}, + {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)}, + {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)}, + {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)}, + {"rx_q0_byte", EDMA_STAT(rx_q0_byte)}, + {"rx_q1_byte", EDMA_STAT(rx_q1_byte)}, + {"rx_q2_byte", EDMA_STAT(rx_q2_byte)}, + {"rx_q3_byte", EDMA_STAT(rx_q3_byte)}, + {"rx_q4_byte", EDMA_STAT(rx_q4_byte)}, + {"rx_q5_byte", EDMA_STAT(rx_q5_byte)}, + {"rx_q6_byte", EDMA_STAT(rx_q6_byte)}, + {"rx_q7_byte", EDMA_STAT(rx_q7_byte)}, + {"tx_desc_error", EDMA_STAT(tx_desc_error)}, +}; + +#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats) + +/* edma_get_strset_count() + * Get strset count + */ +static int edma_get_strset_count(struct net_device *netdev, + int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return EDMA_STATS_LEN; + default: + netdev_dbg(netdev, "%s: Invalid string set", __func__); + return -EOPNOTSUPP; + } +} + + +/* edma_get_strings() + * get stats string + */ +static void edma_get_strings(struct net_device *netdev, uint32_t stringset, + uint8_t *data) +{ + uint8_t *p = data; + uint32_t i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < EDMA_STATS_LEN; i++) { + memcpy(p, edma_gstrings_stats[i].stat_string, + min((size_t)ETH_GSTRING_LEN, + strlen(edma_gstrings_stats[i].stat_string) + + 1)); + p += ETH_GSTRING_LEN; + } + break; + } +} + +/* edma_get_ethtool_stats() + * Get ethtool statistics + */ +static void edma_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + int i; + uint8_t *p = NULL; + + edma_read_append_stats(edma_cinfo); + + for(i = 0; i < EDMA_STATS_LEN; i++) { + p = (uint8_t *)&(edma_cinfo->edma_ethstats) + + edma_gstrings_stats[i].stat_offset; + data[i] = *(uint32_t *)p; + } +} + +/* edma_get_drvinfo() + * get edma driver info + */ +static void edma_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "ess_edma", DRVINFO_LEN); + strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN); +} + +/* edma_nway_reset() + * Reset the phy, if available. + */ +static int edma_nway_reset(struct net_device *netdev) +{ + return -EINVAL; +} + +/* edma_get_wol() + * get wake on lan info + */ +static void edma_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; +} + +/* edma_get_msglevel() + * get message level. + */ +static uint32_t edma_get_msglevel(struct net_device *netdev) +{ + return 0; +} + +/* edma_get_settings() + * Get edma settings + */ +static int edma_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + if (adapter->poll_required) { + struct phy_device *phydev = NULL; + uint16_t phyreg; + + if ((adapter->forced_speed != SPEED_UNKNOWN) + && !(adapter->poll_required)) + return -EPERM; + + phydev = adapter->phydev; + + ecmd->advertising = phydev->advertising; + ecmd->autoneg = phydev->autoneg; + + if (adapter->link_state == __EDMA_LINKDOWN) { + ecmd->speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + } else { + ecmd->speed = phydev->speed; + ecmd->duplex = phydev->duplex; + } + + ecmd->phy_address = adapter->phy_mdio_addr; + + phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA); + if (phyreg & LPA_10HALF) + ecmd->lp_advertising |= ADVERTISED_10baseT_Half; + + if (phyreg & LPA_10FULL) + ecmd->lp_advertising |= ADVERTISED_10baseT_Full; + + if (phyreg & LPA_100HALF) + ecmd->lp_advertising |= ADVERTISED_100baseT_Half; + + if (phyreg & LPA_100FULL) + ecmd->lp_advertising |= ADVERTISED_100baseT_Full; + + phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000); + if (phyreg & LPA_1000HALF) + ecmd->lp_advertising |= ADVERTISED_1000baseT_Half; + + if (phyreg & LPA_1000FULL) + ecmd->lp_advertising |= ADVERTISED_1000baseT_Full; + } else { + /* If the speed/duplex for this GMAC is forced and we + * are not polling for link state changes, return the + * values as specified by platform. This will be true + * for GMACs connected to switch, and interfaces that + * do not use a PHY. + */ + if (!(adapter->poll_required)) { + if (adapter->forced_speed != SPEED_UNKNOWN) { + /* set speed and duplex */ + ethtool_cmd_speed_set(ecmd, SPEED_1000); + ecmd->duplex = DUPLEX_FULL; + + /* Populate capabilities advertised by self */ + ecmd->advertising = 0; + ecmd->autoneg = 0; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_EXTERNAL; + } else { + /* non link polled and non + * forced speed/duplex interface + */ + return -EIO; + } + } + } + + return 0; +} + +/* edma_set_settings() + * Set EDMA settings + */ +static int edma_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + + if ((adapter->forced_speed != SPEED_UNKNOWN) && + !adapter->poll_required) + return -EPERM; + + phydev = adapter->phydev; + phydev->advertising = ecmd->advertising; + phydev->autoneg = ecmd->autoneg; + phydev->speed = ethtool_cmd_speed(ecmd); + phydev->duplex = ecmd->duplex; + + genphy_config_aneg(phydev); + + return 0; +} + +/* edma_get_coalesce + * get interrupt mitigation + */ +static int edma_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + u32 reg_val; + + edma_get_tx_rx_coalesce(®_val); + + /* We read the Interrupt Moderation Timer(IMT) register value, + * use lower 16 bit for rx and higher 16 bit for Tx. We do a + * left shift by 1, because IMT resolution timer is 2usecs. + * Hence the value given by the register is multiplied by 2 to + * get the actual time in usecs. + */ + ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1); + ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1); + + return 0; +} + +/* edma_set_coalesce + * set interrupt mitigation + */ +static int edma_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + if (ec->tx_coalesce_usecs) + edma_change_tx_coalesce(ec->tx_coalesce_usecs); + if (ec->rx_coalesce_usecs) + edma_change_rx_coalesce(ec->rx_coalesce_usecs); + + return 0; +} + +/* edma_set_priv_flags() + * Set EDMA private flags + */ +static int edma_set_priv_flags(struct net_device *netdev, u32 flags) +{ + return 0; +} + +/* edma_get_priv_flags() + * get edma driver flags + */ +static u32 edma_get_priv_flags(struct net_device *netdev) +{ + return 0; +} + +/* edma_get_ringparam() + * get ring size + */ +static void edma_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + + ring->tx_max_pending = edma_cinfo->tx_ring_count; + ring->rx_max_pending = edma_cinfo->rx_ring_count; +} + +/* Ethtool operations + */ +static const struct ethtool_ops edma_ethtool_ops = { + .get_drvinfo = &edma_get_drvinfo, + .get_link = ðtool_op_get_link, + .get_msglevel = &edma_get_msglevel, + .nway_reset = &edma_nway_reset, + .get_wol = &edma_get_wol, + .get_settings = &edma_get_settings, + .set_settings = &edma_set_settings, + .get_strings = &edma_get_strings, + .get_sset_count = &edma_get_strset_count, + .get_ethtool_stats = &edma_get_ethtool_stats, + .get_coalesce = &edma_get_coalesce, + .set_coalesce = &edma_set_coalesce, + .get_priv_flags = edma_get_priv_flags, + .set_priv_flags = edma_set_priv_flags, + .get_ringparam = edma_get_ringparam, +}; + +/* edma_set_ethtool_ops + * Set ethtool operations + */ +void edma_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &edma_ethtool_ops; +} --- /dev/null +++ b/drivers/net/ethernet/qualcomm/essedma/ess_edma.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ESS_EDMA_H_ +#define _ESS_EDMA_H_ + +#include + +struct edma_adapter; +struct edma_hw; + +/* register definition */ +#define EDMA_REG_MAS_CTRL 0x0 +#define EDMA_REG_TIMEOUT_CTRL 0x004 +#define EDMA_REG_DBG0 0x008 +#define EDMA_REG_DBG1 0x00C +#define EDMA_REG_SW_CTRL0 0x100 +#define EDMA_REG_SW_CTRL1 0x104 + +/* Interrupt Status Register */ +#define EDMA_REG_RX_ISR 0x200 +#define EDMA_REG_TX_ISR 0x208 +#define EDMA_REG_MISC_ISR 0x210 +#define EDMA_REG_WOL_ISR 0x218 + +#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x) + +#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100 +#define EDMA_MISC_ISR_AXIR_ERR 0x00000200 +#define EDMA_MISC_ISR_TXF_DEAD 0x00000400 +#define EDMA_MISC_ISR_AXIW_ERR 0x00000800 +#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000 + +#define EDMA_WOL_ISR 0x00000001 + +/* Interrupt Mask Register */ +#define EDMA_REG_MISC_IMR 0x214 +#define EDMA_REG_WOL_IMR 0x218 + +#define EDMA_RX_IMR_NORMAL_MASK 0x1 +#define EDMA_TX_IMR_NORMAL_MASK 0x1 +#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF +#define EDMA_WOL_IMR_NORMAL_MASK 0x1 + +/* Edma receive consumer index */ +#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */ +/* Edma transmit consumer index */ +#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */ + +/* IRQ Moderator Initial Timer Register */ +#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280 +#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF +#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0 +#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16 + +/* Interrupt Control Register */ +#define EDMA_REG_INTR_CTRL 0x284 +#define EDMA_INTR_CLR_TYP_SHIFT 0 +#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1 +#define EDMA_INTR_CLEAR_TYPE_W1 0 +#define EDMA_INTR_CLEAR_TYPE_R 1 + +/* RX Interrupt Mask Register */ +#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */ + +/* TX Interrupt mask register */ +#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */ + +/* Load Ptr Register + * Software sets this bit after the initialization of the head and tail + */ +#define EDMA_REG_TX_SRAM_PART 0x400 +#define EDMA_LOAD_PTR_SHIFT 16 + +/* TXQ Control Register */ +#define EDMA_REG_TXQ_CTRL 0x404 +#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10 +#define EDMA_TXQ_CTRL_TXQ_EN 0x20 +#define EDMA_TXQ_CTRL_ENH_MODE 0x40 +#define EDMA_TXQ_CTRL_LS_8023_EN 0x80 +#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100 +#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200 +#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF +#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF +#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0 +#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16 + +#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */ +#define EDMA_TXF_WATER_MARK_MASK 0x0FFF +#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0 +#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16 +#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000 + +/* WRR Control Register */ +#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c +#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410 +#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414 +#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418 + +/* Weight round robin(WRR), it takes queue as input, and computes + * starting bits where we need to write the weight for a particular + * queue + */ +#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20) + +/* Tx Descriptor Control Register */ +#define EDMA_REG_TPD_RING_SIZE 0x41C +#define EDMA_TPD_RING_SIZE_SHIFT 0 +#define EDMA_TPD_RING_SIZE_MASK 0xFFFF + +/* Transmit descriptor base address */ +#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */ + +/* TPD Index Register */ +#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */ + +#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF +#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000 +#define EDMA_TPD_PROD_IDX_MASK 0xFFFF +#define EDMA_TPD_CONS_IDX_MASK 0xFFFF +#define EDMA_TPD_PROD_IDX_SHIFT 0 +#define EDMA_TPD_CONS_IDX_SHIFT 16 + +/* TX Virtual Queue Mapping Control Register */ +#define EDMA_REG_VQ_CTRL0 0x4A0 +#define EDMA_REG_VQ_CTRL1 0x4A4 + +/* Virtual QID shift, it takes queue as input, and computes + * Virtual QID position in virtual qid control register + */ +#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24) + +/* Virtual Queue Default Value */ +#define EDMA_VQ_REG_VALUE 0x240240 + +/* Tx side Port Interface Control Register */ +#define EDMA_REG_PORT_CTRL 0x4A8 +#define EDMA_PAD_EN_SHIFT 15 + +/* Tx side VLAN Configuration Register */ +#define EDMA_REG_VLAN_CFG 0x4AC + +#define EDMA_TX_CVLAN 16 +#define EDMA_TX_INS_CVLAN 17 +#define EDMA_TX_CVLAN_TAG_SHIFT 0 + +#define EDMA_TX_SVLAN 14 +#define EDMA_TX_INS_SVLAN 15 +#define EDMA_TX_SVLAN_TAG_SHIFT 16 + +/* Tx Queue Packet Statistic Register */ +#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */ + +#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF + +/* Tx Queue Byte Statistic Register */ +#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */ + +/* Load Balance Based Ring Offset Register */ +#define EDMA_REG_LB_RING 0x800 +#define EDMA_LB_RING_ENTRY_MASK 0xff +#define EDMA_LB_RING_ID_MASK 0x7 +#define EDMA_LB_RING_PROFILE_ID_MASK 0x3 +#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8 +#define EDMA_LB_RING_ID_OFFSET 0 +#define EDMA_LB_RING_PROFILE_ID_OFFSET 3 +#define EDMA_LB_REG_VALUE 0x6040200 + +/* Load Balance Priority Mapping Register */ +#define EDMA_REG_LB_PRI_START 0x804 +#define EDMA_REG_LB_PRI_END 0x810 +#define EDMA_LB_PRI_REG_INC 4 +#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4 +#define EDMA_LB_PRI_ENTRY_MASK 0xf + +/* RSS Priority Mapping Register */ +#define EDMA_REG_RSS_PRI 0x820 +#define EDMA_RSS_PRI_ENTRY_MASK 0xf +#define EDMA_RSS_RING_ID_MASK 0x7 +#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4 + +/* RSS Indirection Register */ +#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */ +#define EDMA_NUM_IDT 16 +#define EDMA_RSS_IDT_VALUE 0x64206420 + +/* Default RSS Ring Register */ +#define EDMA_REG_DEF_RSS 0x890 +#define EDMA_DEF_RSS_MASK 0x7 + +/* RSS Hash Function Type Register */ +#define EDMA_REG_RSS_TYPE 0x894 +#define EDMA_RSS_TYPE_NONE 0x01 +#define EDMA_RSS_TYPE_IPV4TCP 0x02 +#define EDMA_RSS_TYPE_IPV6_TCP 0x04 +#define EDMA_RSS_TYPE_IPV4_UDP 0x08 +#define EDMA_RSS_TYPE_IPV6UDP 0x10 +#define EDMA_RSS_TYPE_IPV4 0x20 +#define EDMA_RSS_TYPE_IPV6 0x40 +#define EDMA_RSS_HASH_MODE_MASK 0x7f + +#define EDMA_REG_RSS_HASH_VALUE 0x8C0 + +#define EDMA_REG_RSS_TYPE_RESULT 0x8C4 + +#define EDMA_HASH_TYPE_START 0 +#define EDMA_HASH_TYPE_END 5 +#define EDMA_HASH_TYPE_SHIFT 12 + +#define EDMA_RFS_FLOW_ENTRIES 1024 +#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1) +#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128 + +/* RFD Base Address Register */ +#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */ + +/* RFD Index Register */ +#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) + +#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF +#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000 +#define EDMA_RFD_PROD_IDX_MASK 0xFFF +#define EDMA_RFD_CONS_IDX_MASK 0xFFF +#define EDMA_RFD_PROD_IDX_SHIFT 0 +#define EDMA_RFD_CONS_IDX_SHIFT 16 + +/* Rx Descriptor Control Register */ +#define EDMA_REG_RX_DESC0 0xA10 +#define EDMA_RFD_RING_SIZE_MASK 0xFFF +#define EDMA_RX_BUF_SIZE_MASK 0xFFFF +#define EDMA_RFD_RING_SIZE_SHIFT 0 +#define EDMA_RX_BUF_SIZE_SHIFT 16 + +#define EDMA_REG_RX_DESC1 0xA14 +#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F +#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F +#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF +#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0 +#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8 +#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16 + +/* RXQ Control Register */ +#define EDMA_REG_RXQ_CTRL 0xA18 +#define EDMA_FIFO_THRESH_TYPE_SHIF 0 +#define EDMA_FIFO_THRESH_128_BYTE 0x0 +#define EDMA_FIFO_THRESH_64_BYTE 0x1 +#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002 +#define EDMA_RXQ_CTRL_EN 0x0000FF00 + +/* AXI Burst Size Config */ +#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C +#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0 + +/* Rx Statistics Register */ +#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */ +#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */ + +/* WoL Pattern Length Register */ +#define EDMA_REG_WOL_PATTERN_LEN0 0xC00 +#define EDMA_WOL_PT_LEN_MASK 0xFF +#define EDMA_WOL_PT0_LEN_SHIFT 0 +#define EDMA_WOL_PT1_LEN_SHIFT 8 +#define EDMA_WOL_PT2_LEN_SHIFT 16 +#define EDMA_WOL_PT3_LEN_SHIFT 24 + +#define EDMA_REG_WOL_PATTERN_LEN1 0xC04 +#define EDMA_WOL_PT4_LEN_SHIFT 0 +#define EDMA_WOL_PT5_LEN_SHIFT 8 +#define EDMA_WOL_PT6_LEN_SHIFT 16 + +/* WoL Control Register */ +#define EDMA_REG_WOL_CTRL 0xC08 +#define EDMA_WOL_WK_EN 0x00000001 +#define EDMA_WOL_MG_EN 0x00000002 +#define EDMA_WOL_PT0_EN 0x00000004 +#define EDMA_WOL_PT1_EN 0x00000008 +#define EDMA_WOL_PT2_EN 0x00000010 +#define EDMA_WOL_PT3_EN 0x00000020 +#define EDMA_WOL_PT4_EN 0x00000040 +#define EDMA_WOL_PT5_EN 0x00000080 +#define EDMA_WOL_PT6_EN 0x00000100 + +/* MAC Control Register */ +#define EDMA_REG_MAC_CTRL0 0xC20 +#define EDMA_REG_MAC_CTRL1 0xC24 + +/* WoL Pattern Register */ +#define EDMA_REG_WOL_PATTERN_START 0x5000 +#define EDMA_PATTERN_PART_REG_OFFSET 0x40 + + +/* TX descriptor fields */ +#define EDMA_TPD_HDR_SHIFT 0 +#define EDMA_TPD_PPPOE_EN 0x00000100 +#define EDMA_TPD_IP_CSUM_EN 0x00000200 +#define EDMA_TPD_TCP_CSUM_EN 0x0000400 +#define EDMA_TPD_UDP_CSUM_EN 0x00000800 +#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00 +#define EDMA_TPD_LSO_EN 0x00001000 +#define EDMA_TPD_LSO_V2_EN 0x00002000 +#define EDMA_TPD_IPV4_EN 0x00010000 +#define EDMA_TPD_MSS_MASK 0x1FFF +#define EDMA_TPD_MSS_SHIFT 18 +#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18 + +/* RRD descriptor fields */ +#define EDMA_RRD_NUM_RFD_MASK 0x000F +#define EDMA_RRD_SVLAN 0x8000 +#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF; + +#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF +#define EDMA_RRD_CSUM_FAIL_MASK 0xC000 +#define EDMA_RRD_CVLAN 0x0001 +#define EDMA_RRD_DESC_VALID 0x8000 + +#define EDMA_RRD_PRIORITY_SHIFT 4 +#define EDMA_RRD_PRIORITY_MASK 0x7 +#define EDMA_RRD_PORT_TYPE_SHIFT 7 +#define EDMA_RRD_PORT_TYPE_MASK 0x1F +#endif /* _ESS_EDMA_H_ */