[ipxe-devel] [PATCH 2/4] [sfc] Hardware and queue handling

Martin Habets mhabets at solarflare.com
Fri Apr 7 09:47:17 UTC 2017


Here is a brief description of these source files:
efx_hunt.c - This handles transmit, receive and event queues. It takes care
	     of packet polling, transmit and reception.
efx_common.c - This provides functionality that is common across all our
	       hardware architectures.
efx_common.h - API for efx_common.c.
efx_hunt.h - API for efx_hunt.c.

Signed-off-by: Martin Habets <mhabets at solarflare.com>
---
 efx_common.c |   98 +++++++++++
 efx_common.h |  232 ++++++++++++++++++++++++++
 efx_hunt.c   |  511 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 efx_hunt.h   |   75 ++++++++
 4 files changed, 916 insertions(+)

diff --git a/src/drivers/net/sfc/efx_common.c b/src/drivers/net/sfc/efx_common.c
new file mode 100644
index 000000000000..e9aaba132fdc
--- /dev/null
+++ b/src/drivers/net/sfc/efx_common.c
@@ -0,0 +1,98 @@
+/**************************************************************************
+ *
+ * Driver datapath common code for Solarflare network cards
+ *
+ * Written by Shradha Shah <sshah at solarflare.com>
+ *
+ * Copyright Fen Systems Ltd. 2005
+ * Copyright Level 5 Networks Inc. 2005
+ * Copyright 2006-2017 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * You can also choose to distribute this program under the terms of
+ * the Unmodified Binary Distribution Licence (as given in the file
+ * COPYING.UBDL), provided that you have satisfied its requirements.
+ *
+ ***************************************************************************/
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <byteswap.h>
+#include <ipxe/io.h>
+#include <ipxe/pci.h>
+#include <ipxe/malloc.h>
+#include <ipxe/iobuf.h>
+#include <ipxe/netdevice.h>
+#include "efx_common.h"
+#include "efx_bitfield.h"
+#include "mc_driver_pcol.h"
+
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
+
+/*******************************************************************************
+ *
+ *
+ * Low-level hardware access
+ *
+ *
+ ******************************************************************************/
+
+void
+efx_writel(struct efx_nic *efx, efx_dword_t *value, unsigned int reg)
+{
+	DBGCIO(efx, "Writing partial register %x with " EFX_DWORD_FMT "\n",
+	       reg, EFX_DWORD_VAL(*value));
+	_efx_writel(efx, value->u32[0], reg);
+}
+
+void
+efx_readl(struct efx_nic *efx, efx_dword_t *value, unsigned int reg)
+{
+	value->u32[0] = _efx_readl(efx, reg);
+	DBGCIO(efx, "Read from register %x, got " EFX_DWORD_FMT "\n",
+	       reg, EFX_DWORD_VAL(*value));
+}
+
+/*******************************************************************************
+ *
+ *
+ * Inititialization and Close
+ *
+ *
+ ******************************************************************************/
+void efx_probe(struct net_device *netdev, enum efx_revision revision)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct pci_device *pci = container_of(netdev->dev,
+					      struct pci_device, dev);
+
+	efx->netdev = netdev;
+	efx->revision = revision;
+
+	/* MMIO bar */
+	efx->mmio_start = pci_bar_start(pci, PCI_BASE_ADDRESS_2);
+	efx->mmio_len = pci_bar_size(pci, PCI_BASE_ADDRESS_2);
+	efx->membase = ioremap(efx->mmio_start, efx->mmio_len);
+
+	DBGCP(efx, "BAR of %lx bytes at phys %lx mapped at %p\n",
+	      efx->mmio_len, efx->mmio_start, efx->membase);
+
+	/* Enable PCI access */
+	adjust_pci_device(pci);
+}
+
+void efx_remove(struct net_device *netdev)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+
+	iounmap(efx->membase);
+	efx->membase = NULL;
+}
+
diff --git a/src/drivers/net/sfc/efx_common.h b/src/drivers/net/sfc/efx_common.h
new file mode 100644
index 000000000000..3487966ce249
--- /dev/null
+++ b/src/drivers/net/sfc/efx_common.h
@@ -0,0 +1,232 @@
+/**************************************************************************
+ *
+ * GPL common net driver for Solarflare network cards
+ *
+ * Written by Michael Brown <mbrown at fensystems.co.uk>
+ *
+ * Copyright Fen Systems Ltd. 2005
+ * Copyright Level 5 Networks Inc. 2005
+ * Copyright Solarflare Communications Inc. 2013-2017
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * You can also choose to distribute this program under the terms of
+ * the Unmodified Binary Distribution Licence (as given in the file
+ * COPYING.UBDL), provided that you have satisfied its requirements.
+ *
+ ***************************************************************************/
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
+
+#define __packed    __attribute__((__packed__))
+#define __force     /*nothing*/
+
+typedef uint16_t    __le16;
+typedef uint32_t    __le32;
+typedef uint64_t    __le64;
+
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct{int: -!!(e); }))
+#define BUILD_BUG_ON(e) ((void)BUILD_BUG_ON_ZERO(e))
+
+#include <stdbool.h>
+#include <ipxe/io.h>
+#include <ipxe/netdevice.h>
+#include "efx_bitfield.h"
+#include "mcdi.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/**************************************************************************
+ *
+ * Hardware data structures and sizing
+ *
+ ***************************************************************************/
+typedef efx_qword_t efx_rx_desc_t;
+typedef efx_qword_t efx_tx_desc_t;
+typedef efx_qword_t efx_event_t;
+
+#define EFX_BUF_ALIGN		4096
+#define EFX_RXD_SIZE		512
+#define EFX_RXD_MASK            (EFX_RXD_SIZE - 1)
+#define EFX_TXD_SIZE		512
+#define EFX_TXD_MASK            (EFX_TXD_SIZE - 1)
+#define EFX_EVQ_SIZE		512
+#define EFX_EVQ_MASK            (EFX_EVQ_SIZE - 1)
+
+/* There is space for 512 rx descriptors available. This number can be
+ * anything between 1 and 512 in powers of 2. This value will affect the
+ * network performance. During a test we were able to push 239 descriptors
+ * before we ran out of space.
+ */
+#define EFX_NUM_RX_DESC		64
+#define EFX_NUM_RX_DESC_MASK    (EFX_NUM_RX_DESC - 1)
+
+/* The packet size is usually 1500 bytes hence we choose 1600 as the buf size,
+ * which is (1500+metadata)
+ */
+#define EFX_RX_BUF_SIZE		1600
+
+/* Settings for the state field in efx_nic.
+ */
+#define EFX_STATE_POLLING	1
+
+typedef unsigned long long dma_addr_t;
+
+/** A buffer table allocation backing a tx dma, rx dma or eventq */
+struct efx_special_buffer {
+	dma_addr_t dma_addr;
+	int id;
+};
+
+/** A transmit queue */
+struct efx_tx_queue {
+	/* The hardware ring */
+	efx_tx_desc_t *ring;
+
+	/* The software ring storing io_buffers. */
+	struct io_buffer *buf[EFX_TXD_SIZE];
+
+	/* The buffer table reservation pushed to hardware */
+	struct efx_special_buffer entry;
+
+	/* Software descriptor write ptr */
+	unsigned int write_ptr;
+
+	/* Hardware descriptor read ptr */
+	unsigned int read_ptr;
+};
+
+/** A receive queue */
+struct efx_rx_queue {
+	/* The hardware ring */
+	efx_rx_desc_t *ring;
+
+	/* The software ring storing io_buffers */
+	struct io_buffer *buf[EFX_NUM_RX_DESC];
+
+	/* The buffer table reservation pushed to hardware */
+	struct efx_special_buffer entry;
+
+	/* Descriptor write ptr, into both the hardware and software rings */
+	unsigned int write_ptr;
+
+	/* Hardware completion ptr */
+	unsigned int read_ptr;
+
+	/* The value of RX_CONT in the previous RX event */
+	unsigned int rx_cont_prev;
+};
+
+/** An event queue */
+struct efx_ev_queue {
+	/* The hardware ring to push to hardware.
+	 * Must be the first entry in the structure.
+	 */
+	efx_event_t *ring;
+
+	/* The buffer table reservation pushed to hardware */
+	struct efx_special_buffer entry;
+
+	/* Pointers into the ring */
+	unsigned int read_ptr;
+};
+
+/* Hardware revisions */
+enum efx_revision {
+	EFX_HUNTINGTON,
+};
+
+/** Hardware access */
+struct efx_nic {
+	struct net_device *netdev;
+	enum efx_revision revision;
+	const struct efx_nic_type *type;
+
+	int port;
+	u32 state;
+
+	/** Memory and IO base */
+	void *membase;
+	unsigned long mmio_start;
+	unsigned long mmio_len;
+
+	/* Buffer table allocation head */
+	int buffer_head;
+
+	/* Queues */
+	struct efx_rx_queue rxq;
+	struct efx_tx_queue txq;
+	struct efx_ev_queue evq;
+
+	unsigned int rx_prefix_size;
+
+	/** INT_REG_KER */
+	int int_en;
+	efx_oword_t int_ker __aligned;
+
+	/* Set to true if firmware supports the workaround for bug35388 */
+	bool workaround_35388;
+
+};
+
+
+/** Efx device type definition */
+struct efx_nic_type {
+	int (*mcdi_rpc)(struct efx_nic *efx, unsigned int cmd,
+			const efx_dword_t *inbuf, size_t inlen,
+			efx_dword_t *outbuf, size_t outlen,
+			size_t *outlen_actual, bool quiet);
+};
+
+extern const struct efx_nic_type hunt_nic_type;
+
+#define EFX_MAC_FRAME_LEN(_mtu)					\
+	(((_mtu)						\
+	  + /* EtherII already included */			\
+	  + 4 /* FCS */						\
+	  /* No VLAN supported */				\
+	  + 16 /* bug16772 */					\
+	  + 7) & ~7)
+
+/*******************************************************************************
+ *
+ *
+ * Hardware API
+ *
+ *
+ ******************************************************************************/
+static inline void _efx_writel(struct efx_nic *efx, uint32_t value,
+			       unsigned int reg)
+{
+	writel((value), (efx)->membase + (reg));
+}
+
+static inline uint32_t _efx_readl(struct efx_nic *efx, unsigned int reg)
+{
+	return readl((efx)->membase + (reg));
+}
+
+#define efx_writel_table(efx, value, index, reg)		\
+	efx_writel(efx, value, (reg) + ((index) * reg##_STEP))
+
+#define efx_writel_page(efx, value, index, reg)			\
+	efx_writel(efx, value, (reg) + ((index) * 0x2000))
+
+/* Hardware access */
+extern void efx_writel(struct efx_nic *efx, efx_dword_t *value,
+		       unsigned int reg);
+extern void efx_readl(struct efx_nic *efx, efx_dword_t *value,
+		      unsigned int reg);
+
+/* Initialisation */
+extern void efx_probe(struct net_device *netdev, enum efx_revision rev);
+extern void efx_remove(struct net_device *netdev);
+
+#endif /* EFX_COMMON_H */
diff --git a/src/drivers/net/sfc/efx_hunt.c b/src/drivers/net/sfc/efx_hunt.c
new file mode 100644
index 000000000000..51722dc71941
--- /dev/null
+++ b/src/drivers/net/sfc/efx_hunt.c
@@ -0,0 +1,511 @@
+/**************************************************************************
+ *
+ * Driver datapath for Solarflare network cards
+ *
+ * Written by Shradha Shah <sshah at solarflare.com>
+ *
+ * Copyright 2012-2017 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * You can also choose to distribute this program under the terms of
+ * the Unmodified Binary Distribution Licence (as given in the file
+ * COPYING.UBDL), provided that you have satisfied its requirements.
+ *
+ ***************************************************************************/
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <byteswap.h>
+#include <ipxe/io.h>
+#include <ipxe/pci.h>
+#include <ipxe/malloc.h>
+#include <ipxe/iobuf.h>
+#include <ipxe/netdevice.h>
+#include "efx_hunt.h"
+#include "efx_bitfield.h"
+#include "ef10_regs.h"
+
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
+
+void efx_hunt_free_special_buffer(void *buf, int bytes)
+{
+	free_dma(buf, bytes);
+}
+
+static void *efx_hunt_alloc_special_buffer(int bytes,
+					   struct efx_special_buffer *entry)
+{
+	void *buffer;
+	dma_addr_t dma_addr;
+
+	/* Allocate the buffer, aligned on a buffer address boundary.  This
+	 * buffer will be passed into an MC_CMD_INIT_*Q command to setup the
+	 * appropriate type of queue via MCDI.
+	 */
+	buffer = malloc_dma(bytes, EFX_BUF_ALIGN);
+	if (!buffer)
+		return NULL;
+
+	entry->dma_addr = dma_addr = virt_to_bus(buffer);
+	assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0);
+
+	/* Buffer table entries aren't allocated, so set id to zero */
+	entry->id = 0;
+	DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer);
+
+	return buffer;
+}
+
+/*******************************************************************************
+ *
+ *
+ * TX
+ *
+ *
+ ******************************************************************************/
+static void
+efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob)
+{
+	dma_addr_t dma_addr;
+
+	dma_addr = virt_to_bus(iob->data);
+
+	EFX_POPULATE_QWORD_4(*txd,
+			     ESF_DZ_TX_KER_TYPE, 0,
+			     ESF_DZ_TX_KER_CONT, 0,
+			     ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob),
+			     ESF_DZ_TX_KER_BUF_ADDR, dma_addr);
+}
+
+static void
+efx_hunt_notify_tx_desc(struct efx_nic *efx)
+{
+	struct efx_tx_queue *txq = &efx->txq;
+	int ptr = txq->write_ptr & EFX_TXD_MASK;
+	efx_dword_t reg;
+
+	EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr);
+	efx_writel_page(efx, &reg, 0, ER_DZ_TX_DESC_UPD_DWORD);
+}
+
+int
+efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct efx_tx_queue *txq = &efx->txq;
+	int fill_level, space;
+	efx_tx_desc_t *txd;
+	int buf_id;
+
+	fill_level = txq->write_ptr - txq->read_ptr;
+	space = EFX_TXD_SIZE - fill_level - 1;
+	if (space < 1)
+		return -ENOBUFS;
+
+	/* Save the iobuffer for later completion */
+	buf_id = txq->write_ptr & EFX_TXD_MASK;
+	assert(txq->buf[buf_id] == NULL);
+	txq->buf[buf_id] = iob;
+
+	DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n",
+	       buf_id, iob, iob->data, iob_len(iob));
+
+	/* Form the descriptor, and push it to hardware */
+	txd = txq->ring + buf_id;
+	efx_hunt_build_tx_desc(txd, iob);
+	++txq->write_ptr;
+	efx_hunt_notify_tx_desc(efx);
+
+	return 0;
+}
+
+static void
+efx_hunt_transmit_done(struct efx_nic *efx, int id)
+{
+	struct efx_tx_queue *txq = &efx->txq;
+	unsigned int read_ptr, stop;
+
+	/* Complete all buffers from read_ptr up to and including id */
+	read_ptr = txq->read_ptr & EFX_TXD_MASK;
+	stop = (id + 1) & EFX_TXD_MASK;
+
+	while (read_ptr != stop) {
+		struct io_buffer *iob = txq->buf[read_ptr];
+
+		assert(iob);
+		/* Complete the tx buffer */
+		if (iob)
+			netdev_tx_complete(efx->netdev, iob);
+		DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob);
+		txq->buf[read_ptr] = NULL;
+
+		++txq->read_ptr;
+		read_ptr = txq->read_ptr & EFX_TXD_MASK;
+	}
+}
+
+int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct efx_tx_queue *txq = &efx->txq;
+	size_t bytes;
+
+	/* Allocate hardware transmit queue */
+	bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE;
+	txq->ring = efx_hunt_alloc_special_buffer(bytes, &txq->entry);
+	if (!txq->ring)
+		return -ENOMEM;
+
+	txq->read_ptr = txq->write_ptr = 0;
+	*dma_addr = txq->entry.dma_addr;
+	return 0;
+}
+
+/*******************************************************************************
+ *
+ *
+ * RX
+ *
+ *
+ ******************************************************************************/
+static void
+efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob)
+{
+	dma_addr_t dma_addr = virt_to_bus(iob->data);
+
+	EFX_POPULATE_QWORD_2(*rxd,
+			     ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE,
+			     ESF_DZ_RX_KER_BUF_ADDR, dma_addr);
+}
+
+static void
+efx_hunt_notify_rx_desc(struct efx_nic *efx)
+{
+	struct efx_rx_queue *rxq = &efx->rxq;
+	int ptr = rxq->write_ptr & EFX_RXD_MASK;
+	efx_dword_t reg;
+
+	EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr);
+	efx_writel_page(efx, &reg, 0, ER_DZ_RX_DESC_UPD);
+}
+
+static void
+efx_hunt_rxq_fill(struct efx_nic *efx)
+{
+	struct efx_rx_queue *rxq = &efx->rxq;
+	int fill_level = rxq->write_ptr - rxq->read_ptr;
+	int space = EFX_NUM_RX_DESC - fill_level - 1;
+	int pushed = 0;
+
+	while (space) {
+		int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1);
+		int desc_id = rxq->write_ptr & EFX_RXD_MASK;
+		struct io_buffer *iob;
+		efx_rx_desc_t *rxd;
+
+		assert(rxq->buf[buf_id] == NULL);
+		iob = alloc_iob(EFX_RX_BUF_SIZE);
+		if (!iob)
+			break;
+
+		DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n",
+		      buf_id, iob, iob->data);
+
+		rxq->buf[buf_id] = iob;
+		rxd = rxq->ring + desc_id;
+		efx_hunt_build_rx_desc(rxd, iob);
+		++rxq->write_ptr;
+		++pushed;
+		--space;
+	}
+
+	/* Push the ptr to hardware */
+	if (pushed > 0) {
+		efx_hunt_notify_rx_desc(efx);
+
+		DBGCP(efx, "pushed %d rx buffers to fill level %d\n",
+		      pushed, rxq->write_ptr - rxq->read_ptr);
+	}
+}
+
+static void
+efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
+{
+	struct efx_rx_queue *rxq = &efx->rxq;
+	unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK;
+	unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK;
+	struct io_buffer *iob;
+
+	/* id is the lower 4 bits of the desc index + 1 in huntington*/
+	/* hence anding with 15 */
+	assert((id & 15) == ((read_ptr + (len != 0)) & 15));
+
+	/* Pop this rx buffer out of the software ring */
+	iob = rxq->buf[buf_ptr];
+	rxq->buf[buf_ptr] = NULL;
+
+	DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n",
+	       read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop);
+
+	/* Pass the packet up if required */
+	if (drop)
+		netdev_rx_err(efx->netdev, iob, EBADMSG);
+	else {
+		iob_put(iob, len);
+		iob_pull(iob, efx->rx_prefix_size);
+		netdev_rx(efx->netdev, iob);
+	}
+
+	++rxq->read_ptr;
+}
+
+int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct efx_rx_queue *rxq = &efx->rxq;
+	size_t bytes;
+
+	/* Allocate hardware receive queue */
+	bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE;
+	rxq->ring = efx_hunt_alloc_special_buffer(bytes, &rxq->entry);
+	if (rxq->ring == NULL)
+		return -ENOMEM;
+
+	rxq->read_ptr = rxq->write_ptr = 0;
+	*dma_addr = rxq->entry.dma_addr;
+	return 0;
+}
+
+/*******************************************************************************
+ *
+ *
+ * Event queues and interrupts
+ *
+ *
+ ******************************************************************************/
+int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct efx_ev_queue *evq = &efx->evq;
+	size_t bytes;
+
+	/* Allocate the hardware event queue */
+	bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE;
+	evq->ring = efx_hunt_alloc_special_buffer(bytes, &evq->entry);
+	if (evq->ring == NULL)
+		return -ENOMEM;
+
+	memset(evq->ring, 0xff, bytes);
+	evq->read_ptr = 0;
+	*dma_addr = evq->entry.dma_addr;
+	return 0;
+}
+
+static void
+efx_hunt_clear_interrupts(struct efx_nic *efx)
+{
+	efx_dword_t reg;
+	/* read the ISR */
+	efx_readl(efx, &reg, ER_DZ_BIU_INT_ISR);
+}
+
+/**
+ * See if an event is present
+ *
+ * @v event            EFX event structure
+ * @ret True           An event is pending
+ * @ret False          No event is pending
+ *
+ * We check both the high and low dword of the event for all ones.  We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords.  This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int
+efx_hunt_event_present(efx_event_t *event)
+{
+	return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+		  EFX_DWORD_IS_ALL_ONES(event->dword[1])));
+}
+
+static void
+efx_hunt_evq_read_ack(struct efx_nic *efx)
+{
+	struct efx_ev_queue *evq = &efx->evq;
+	efx_dword_t reg;
+
+	if (efx->workaround_35388) {
+		EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
+				     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+				     ERF_DD_EVQ_IND_RPTR,
+				    evq->read_ptr >> ERF_DD_EVQ_IND_RPTR_WIDTH);
+		efx_writel_page(efx, &reg, 0, ER_DD_EVQ_INDIRECT);
+		EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
+				     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+				     ERF_DD_EVQ_IND_RPTR, evq->read_ptr &
+				     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+		efx_writel_page(efx, &reg, 0, ER_DD_EVQ_INDIRECT);
+	} else {
+		EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr);
+		efx_writel_table(efx, &reg, 0, ER_DZ_EVQ_RPTR);
+	}
+}
+
+static unsigned int
+efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt)
+{
+	struct efx_rx_queue *rxq = &efx->rxq;
+	int ev_code, desc_ptr, len;
+	int next_ptr_lbits, packet_drop;
+	int rx_cont;
+
+	/* Decode event */
+	ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE);
+
+	switch (ev_code) {
+	case ESE_DZ_EV_CODE_TX_EV:
+		desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX);
+		efx_hunt_transmit_done(efx, desc_ptr);
+		break;
+
+	case ESE_DZ_EV_CODE_RX_EV:
+		len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES);
+		next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS);
+		rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT);
+
+		/* We don't expect to receive scattered packets, so drop the
+		 * packet if RX_CONT is set on the current or previous event, or
+		 * if len is zero.
+		 */
+		packet_drop = (len == 0) | (rx_cont << 1) |
+			      (rxq->rx_cont_prev << 2);
+		efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop);
+		rxq->rx_cont_prev = rx_cont;
+		return 1;
+
+	default:
+		DBGCP(efx, "Unknown event type %d\n", ev_code);
+		break;
+	}
+	return 0;
+}
+
+void efx_hunt_poll(struct net_device *netdev)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct efx_ev_queue *evq = &efx->evq;
+	efx_event_t *evt;
+	int budget = 10;
+
+	/* Read the event queue by directly looking for events
+	 * (we don't even bother to read the eventq write ptr)
+	 */
+	evt = evq->ring + evq->read_ptr;
+	while (efx_hunt_event_present(evt) && (budget > 0)) {
+		DBGCP(efx, "Event at index 0x%x address %p is "
+		      EFX_QWORD_FMT "\n", evq->read_ptr,
+		      evt, EFX_QWORD_VAL(*evt));
+
+		budget -= efx_hunt_handle_event(efx, evt);
+
+		/* Clear the event */
+		EFX_SET_QWORD(*evt);
+
+		/* Move to the next event. We don't ack the event
+		 * queue until the end
+		 */
+		evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK);
+		evt = evq->ring + evq->read_ptr;
+	}
+
+	/* Push more rx buffers if needed */
+	efx_hunt_rxq_fill(efx);
+
+	/* Clear any pending interrupts */
+	efx_hunt_clear_interrupts(efx);
+
+	/* Ack the event queue if interrupts are enabled */
+	if (efx->int_en)
+		efx_hunt_evq_read_ack(efx);
+}
+
+void efx_hunt_irq(struct net_device *netdev, int enable)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+
+	efx->int_en = enable;
+
+	/* If interrupts are enabled, prime the event queue.  Otherwise ack any
+	 * pending interrupts
+	 */
+	if (enable)
+		efx_hunt_evq_read_ack(efx);
+	else if (efx->netdev->state & NETDEV_OPEN)
+		efx_hunt_clear_interrupts(efx);
+}
+
+/*******************************************************************************
+ *
+ *
+ * Initialization and Close
+ *
+ *
+ ******************************************************************************/
+int efx_hunt_open(struct net_device *netdev)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	efx_dword_t cmd;
+
+	/* Set interrupt moderation to 0*/
+	EFX_POPULATE_DWORD_2(cmd,
+			     ERF_DZ_TC_TIMER_MODE, 0,
+			     ERF_DZ_TC_TIMER_VAL, 0);
+	efx_writel_page(efx, &cmd, 0, ER_DZ_EVQ_TMR);
+
+	/* Ack the eventq */
+	if (efx->int_en)
+		efx_hunt_evq_read_ack(efx);
+
+	/* Push receive buffers */
+	efx_hunt_rxq_fill(efx);
+
+	return 0;
+}
+
+void efx_hunt_close(struct net_device *netdev)
+{
+	struct efx_nic *efx = netdev_priv(netdev);
+	struct efx_rx_queue *rxq = &efx->rxq;
+	struct efx_tx_queue *txq = &efx->txq;
+	int i;
+
+	/* Complete outstanding descriptors */
+	for (i = 0; i < EFX_NUM_RX_DESC; i++) {
+		if (rxq->buf[i]) {
+			free_iob(rxq->buf[i]);
+			rxq->buf[i] = NULL;
+		}
+	}
+
+	for (i = 0; i < EFX_TXD_SIZE; i++) {
+		if (txq->buf[i]) {
+			netdev_tx_complete(efx->netdev, txq->buf[i]);
+			txq->buf[i] = NULL;
+		}
+	}
+
+	/* Clear interrupts */
+	efx_hunt_clear_interrupts(efx);
+}
+
diff --git a/src/drivers/net/sfc/efx_hunt.h b/src/drivers/net/sfc/efx_hunt.h
new file mode 100644
index 000000000000..b8377bf20011
--- /dev/null
+++ b/src/drivers/net/sfc/efx_hunt.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * GPL net driver for Solarflare network cards
+ *
+ * Written by Shradha Shah <sshah at solarflare.com>
+ *
+ * Copyright 2012-2017 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * You can also choose to distribute this program under the terms of
+ * the Unmodified Binary Distribution Licence (as given in the file
+ * COPYING.UBDL), provided that you have satisfied its requirements.
+ *
+ ***************************************************************************/
+
+#ifndef EFX_HUNT_H
+#define EFX_HUNT_H
+
+#include "efx_common.h"
+
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
+
+/**************************************************************************
+ *
+ * Hardware data structures and sizing
+ *
+ ***************************************************************************/
+
+#define EFX_EV_SIZE(_nevs)     ((_nevs) * sizeof(efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs)    (EFX_EV_SIZE(_nevs) / EFX_BUF_ALIGN)
+
+#define	EFX_RXQ_SIZE(_ndescs)	((_ndescs) * sizeof(efx_qword_t))
+#define	EFX_RXQ_NBUFS(_ndescs)	(EFX_RXQ_SIZE(_ndescs) / EFX_BUF_ALIGN)
+
+#define	EFX_TXQ_SIZE(_ndescs)	((_ndescs) * sizeof(efx_qword_t))
+#define	EFX_TXQ_NBUFS(_ndescs)	(EFX_TXQ_SIZE(_ndescs) / EFX_BUF_ALIGN)
+
+/** MCDI request structure */
+struct efx_mcdi_req_s {
+	unsigned int    emr_cmd;
+	efx_dword_t     *emr_in_buf;
+	size_t          emr_in_length;
+	int             emr_rc;
+	efx_dword_t     *emr_out_buf;
+	size_t          emr_out_length;
+	size_t          emr_out_length_used;
+};
+
+/*******************************************************************************
+ *
+ *
+ * Hardware API
+ *
+ *
+ ******************************************************************************/
+
+extern void efx_hunt_free_special_buffer(void *buf, int bytes);
+
+/* Data path entry points */
+extern int efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob);
+extern void efx_hunt_poll(struct net_device *netdev);
+extern void efx_hunt_irq(struct net_device *netdev, int enable);
+
+/* Initialisation */
+extern int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr);
+extern int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr);
+extern int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr);
+extern int efx_hunt_open(struct net_device *netdev);
+extern void efx_hunt_close(struct net_device *netdev);
+
+#endif /* EFX_HUNT_H */



More information about the ipxe-devel mailing list