Skip to content

Commit

Permalink
net: txgbe: Allocate Rx and Tx resources
Browse files Browse the repository at this point in the history
Allocate receive and transmit descriptors for all queues.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
  • Loading branch information
Jiawen Wu authored and intel-lab-lkp committed Aug 30, 2022
1 parent 123098a commit c864cd7
Show file tree
Hide file tree
Showing 4 changed files with 615 additions and 1 deletion.
64 changes: 64 additions & 0 deletions drivers/net/ethernet/wangxun/txgbe/txgbe.h
Expand Up @@ -11,9 +11,19 @@
#include "txgbe_type.h"

/* TX/RX descriptor defines */
#define TXGBE_DEFAULT_TXD 512
#define TXGBE_DEFAULT_TX_WORK 256
#define TXGBE_MAX_TXD 8192
#define TXGBE_MIN_TXD 128

#if (PAGE_SIZE < 8192)
#define TXGBE_DEFAULT_RXD 512
#define TXGBE_DEFAULT_RX_WORK 256
#else
#define TXGBE_DEFAULT_RXD 256
#define TXGBE_DEFAULT_RX_WORK 128
#endif

#define TXGBE_MAX_RXD 8192
#define TXGBE_MIN_RXD 128

Expand All @@ -33,20 +43,44 @@
*/
#define TXGBE_RX_HDR_SIZE TXGBE_RXBUFFER_256

/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct txgbe_tx_buffer {
union txgbe_tx_desc *next_to_watch;
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
};

struct txgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
dma_addr_t page_dma;
struct page *page;
};

struct txgbe_ring {
struct txgbe_ring *next; /* pointer to next ring in q_vector */
struct txgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
void *desc; /* descriptor ring memory */
union {
struct txgbe_tx_buffer *tx_buffer_info;
struct txgbe_rx_buffer *rx_buffer_info;
};
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */

u16 count; /* amount of descriptors */

u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx;
u16 next_to_use;
u16 next_to_clean;
u16 rx_buf_len;
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;

Expand All @@ -66,6 +100,13 @@ static inline unsigned int txgbe_rx_bufsz(struct txgbe_ring __maybe_unused *ring
#endif
}

static inline unsigned int txgbe_rx_pg_order(struct txgbe_ring __maybe_unused *ring)
{
return 0;
}

#define txgbe_rx_pg_size(_ring) (PAGE_SIZE << txgbe_rx_pg_order(_ring))

struct txgbe_ring_container {
struct txgbe_ring *ring; /* pointer to linked list of rings */
u16 work_limit; /* total work allowed per interrupt */
Expand Down Expand Up @@ -175,10 +216,12 @@ struct txgbe_adapter {
/* Tx fast path data */
int num_tx_queues;
u16 tx_itr_setting;
u16 tx_work_limit;

/* Rx fast path data */
int num_rx_queues;
u16 rx_itr_setting;
u16 rx_work_limit;

/* TX */
struct txgbe_ring *tx_ring[TXGBE_MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
Expand Down Expand Up @@ -246,6 +289,15 @@ enum txgbe_state_t {
__TXGBE_IN_SFP_INIT,
};

struct txgbe_cb {
dma_addr_t dma;
u16 append_cnt; /* number of skb's appended */
bool page_released;
bool dma_released;
};

#define TXGBE_CB(skb) ((struct txgbe_cb *)(skb)->cb)

/* needed by txgbe_main.c */
void txgbe_service_event_schedule(struct txgbe_adapter *adapter);
void txgbe_assign_netdev_ops(struct net_device *netdev);
Expand All @@ -259,6 +311,10 @@ void txgbe_down(struct txgbe_adapter *adapter);
void txgbe_reinit_locked(struct txgbe_adapter *adapter);
void txgbe_reset(struct txgbe_adapter *adapter);
void txgbe_disable_device(struct txgbe_adapter *adapter);
int txgbe_setup_rx_resources(struct txgbe_ring *rx_ring);
int txgbe_setup_tx_resources(struct txgbe_ring *tx_ring);
void txgbe_free_rx_resources(struct txgbe_ring *rx_ring);
void txgbe_free_tx_resources(struct txgbe_ring *tx_ring);
void txgbe_configure_rx_ring(struct txgbe_adapter *adapter,
struct txgbe_ring *ring);
void txgbe_configure_tx_ring(struct txgbe_adapter *adapter,
Expand All @@ -267,13 +323,21 @@ int txgbe_init_interrupt_scheme(struct txgbe_adapter *adapter);
void txgbe_reset_interrupt_capability(struct txgbe_adapter *adapter);
void txgbe_set_interrupt_capability(struct txgbe_adapter *adapter);
void txgbe_clear_interrupt_scheme(struct txgbe_adapter *adapter);
void txgbe_unmap_and_free_tx_resource(struct txgbe_ring *ring,
struct txgbe_tx_buffer *tx_buffer);
void txgbe_configure_port(struct txgbe_adapter *adapter);
void txgbe_set_rx_mode(struct net_device *netdev);
int txgbe_write_mc_addr_list(struct net_device *netdev);
void txgbe_write_eitr(struct txgbe_q_vector *q_vector);
int txgbe_poll(struct napi_struct *napi, int budget);
void txgbe_disable_rx_queue(struct txgbe_adapter *adapter,
struct txgbe_ring *ring);

static inline struct netdev_queue *txring_txq(const struct txgbe_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}

int txgbe_write_uc_addr_list(struct net_device *netdev, int pool);
int txgbe_add_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool);
Expand Down
9 changes: 9 additions & 0 deletions drivers/net/ethernet/wangxun/txgbe/txgbe_lib.c
Expand Up @@ -166,11 +166,19 @@ static int txgbe_alloc_q_vector(struct txgbe_adapter *adapter,
/* initialize CPU for DCA */
q_vector->cpu = -1;

/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
txgbe_poll, 64);

/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
q_vector->v_idx = v_idx;

/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
q_vector->rx.work_limit = adapter->rx_work_limit;

/* initialize pointer to rings */
ring = q_vector->ring;

Expand Down Expand Up @@ -265,6 +273,7 @@ static void txgbe_free_q_vector(struct txgbe_adapter *adapter, int v_idx)
adapter->rx_ring[ring->queue_index] = NULL;

adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
kfree_rcu(q_vector, rcu);
}

Expand Down

0 comments on commit c864cd7

Please sign in to comment.