Skip to content

Commit

Permalink
can: c_can: prepare to up the message objects number
Browse files Browse the repository at this point in the history
As pointed by commit c0a9f4d ("can: c_can: Reduce register access")
the "driver casts the 16 message objects in stone, which is completely
braindead as contemporary hardware has up to 128 message objects".

The patch prepares the module to extend the number of message objects
beyond the 32 currently managed. This was achieved by transforming the
constants used to manage RX/TX messages into variables without changing
the driver policy.

Signed-off-by: Dario Binacchi <dariobin@libero.it>
  • Loading branch information
passgat authored and intel-lab-lkp committed Feb 24, 2021
1 parent a0d9525 commit 9bbfc6b
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 33 deletions.
56 changes: 37 additions & 19 deletions drivers/net/can/c_can/c_can.c
Expand Up @@ -173,9 +173,6 @@
/* Wait for ~1 sec for INIT bit */
#define INIT_WAIT_MS 1000

/* napi related */
#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM

/* c_can lec values */
enum c_can_lec_type {
LEC_NO_ERROR = 0,
Expand Down Expand Up @@ -325,7 +322,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
* first, i.e. clear the MSGVAL flag in the arbiter.
*/
if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
u32 obj = idx + priv->msg_obj_tx_first;

c_can_inval_msg_object(dev, iface, obj);
change_bit(idx, &priv->tx_dir);
Expand Down Expand Up @@ -463,10 +460,10 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
* prioritized. The lowest buffer number wins.
*/
idx = fls(atomic_read(&priv->tx_active));
obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
obj = idx + priv->msg_obj_tx_first;

/* If this is the last buffer, stop the xmit queue */
if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
if (idx == priv->msg_obj_tx_num - 1)
netif_stop_queue(dev);
/*
* Store the message in the interface so we can call
Expand Down Expand Up @@ -549,17 +546,18 @@ static int c_can_set_bittiming(struct net_device *dev)
*/
static void c_can_configure_msg_objects(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
int i;

/* first invalidate all message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++)
c_can_inval_msg_object(dev, IF_RX, i);

/* setup receive message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++)
c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);

c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
c_can_setup_receive_object(dev, IF_RX, priv->msg_obj_rx_last, 0, 0,
IF_MCONT_RCV_EOB);
}

Expand Down Expand Up @@ -730,7 +728,7 @@ static void c_can_do_tx(struct net_device *dev)
while ((idx = ffs(pend))) {
idx--;
pend &= ~(1 << idx);
obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
obj = idx + priv->msg_obj_tx_first;
c_can_inval_tx_object(dev, IF_TX, obj);
can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
Expand All @@ -740,7 +738,7 @@ static void c_can_do_tx(struct net_device *dev)
/* Clear the bits in the tx_active mask */
atomic_sub(clr, &priv->tx_active);

if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
if (clr & (1 << (priv->msg_obj_tx_num - 1)))
netif_wake_queue(dev);

if (pkts) {
Expand All @@ -755,11 +753,11 @@ static void c_can_do_tx(struct net_device *dev)
* raced with the hardware or failed to readout all upper
* objects in the last run due to quota limit.
*/
static u32 c_can_adjust_pending(u32 pend)
static u32 c_can_adjust_pending(u32 pend, u32 rx_mask)
{
u32 weight, lasts;

if (pend == RECEIVE_OBJECT_BITS)
if (pend == rx_mask)
return pend;

/*
Expand Down Expand Up @@ -862,8 +860,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
* It is faster to read only one 16bit register. This is only possible
* for a maximum number of 16 objects.
*/
BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
"Implementation does not support more message objects than 16");
WARN_ON(priv->msg_obj_rx_last > 16);

while (quota > 0) {
if (!pend) {
Expand All @@ -874,7 +871,8 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
* If the pending field has a gap, handle the
* bits above the gap first.
*/
toread = c_can_adjust_pending(pend);
toread = c_can_adjust_pending(pend,
priv->msg_obj_rx_mask);
} else {
toread = pend;
}
Expand Down Expand Up @@ -1205,17 +1203,36 @@ static int c_can_close(struct net_device *dev)
return 0;
}

struct net_device *alloc_c_can_dev(void)
struct net_device *alloc_c_can_dev(int msg_obj_num)
{
struct net_device *dev;
struct c_can_priv *priv;
int msg_obj_tx_num = msg_obj_num / 2;

dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
dev = alloc_candev(sizeof(struct c_can_priv), msg_obj_tx_num);
if (!dev)
return NULL;

priv = netdev_priv(dev);
netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
priv->msg_obj_num = msg_obj_num;
priv->msg_obj_rx_num = msg_obj_num - msg_obj_tx_num;
priv->msg_obj_rx_first = 1;
priv->msg_obj_rx_last =
priv->msg_obj_rx_first + priv->msg_obj_rx_num - 1;
priv->msg_obj_rx_mask = ((u64)1 << priv->msg_obj_rx_num) - 1;

priv->msg_obj_tx_num = msg_obj_tx_num;
priv->msg_obj_tx_first = priv->msg_obj_rx_last + 1;
priv->msg_obj_tx_last =
priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1;

priv->dlc = kcalloc(msg_obj_tx_num, sizeof(*priv->dlc), GFP_KERNEL);
if (!priv->dlc) {
free_candev(dev);
return NULL;
}

netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);

priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
Expand Down Expand Up @@ -1320,6 +1337,7 @@ void free_c_can_dev(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);

netif_napi_del(&priv->napi);
kfree(priv->dlc);
free_candev(dev);
}
EXPORT_SYMBOL_GPL(free_c_can_dev);
Expand Down
23 changes: 10 additions & 13 deletions drivers/net/can/c_can/c_can.h
Expand Up @@ -22,18 +22,7 @@
#ifndef C_CAN_H
#define C_CAN_H

/* message object split */
#define C_CAN_NO_OF_OBJECTS 32
#define C_CAN_MSG_OBJ_RX_NUM 16
#define C_CAN_MSG_OBJ_TX_NUM 16

#define C_CAN_MSG_OBJ_RX_FIRST 1
#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
C_CAN_MSG_OBJ_RX_NUM - 1)

#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)

#define RECEIVE_OBJECT_BITS 0x0000ffff

enum reg {
C_CAN_CTRL_REG = 0,
Expand Down Expand Up @@ -193,6 +182,14 @@ struct c_can_priv {
struct napi_struct napi;
struct net_device *dev;
struct device *device;
int msg_obj_num;
int msg_obj_rx_num;
int msg_obj_tx_num;
int msg_obj_rx_first;
int msg_obj_rx_last;
int msg_obj_tx_first;
int msg_obj_tx_last;
u32 msg_obj_rx_mask;
atomic_t tx_active;
atomic_t sie_pending;
unsigned long tx_dir;
Expand All @@ -209,10 +206,10 @@ struct c_can_priv {
void (*raminit) (const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
u32 rxmasked;
u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
u32 *dlc;
};

struct net_device *alloc_c_can_dev(void);
struct net_device *alloc_c_can_dev(int msg_obj_num);
void free_c_can_dev(struct net_device *dev);
int register_c_can_dev(struct net_device *dev);
void unregister_c_can_dev(struct net_device *dev);
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/can/c_can/c_can_platform.c
Expand Up @@ -293,7 +293,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
}

/* allocate the c_can device */
dev = alloc_c_can_dev();
dev = alloc_c_can_dev(C_CAN_NO_OF_OBJECTS);
if (!dev) {
ret = -ENOMEM;
goto exit;
Expand Down

0 comments on commit 9bbfc6b

Please sign in to comment.