Skip to content

Commit 4b7fe92

Browse files
hartkoppmarckleinebudde
authored andcommitted
can: isotp: add local echo tx processing for consecutive frames
Instead of dumping the CAN frames into the netdevice queue the process to transmit consecutive frames (CF) now waits for the frame to be transmitted and therefore echo'ed from the CAN interface. Link: https://lore.kernel.org/all/20220309120416.83514-1-socketcan@hartkopp.net Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
1 parent 3126b73 commit 4b7fe92

File tree

1 file changed

+139
-80
lines changed

1 file changed

+139
-80
lines changed

net/can/isotp.c

Lines changed: 139 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
* - use CAN_ISOTP_WAIT_TX_DONE flag to block the caller until the PDU is sent
1515
* - as we have static buffers the check whether the PDU fits into the buffer
1616
* is done at FF reception time (no support for sending 'wait frames')
17-
* - take care of the tx-queue-len as traffic shaping is still on the TODO list
1817
*
1918
* Copyright (c) 2020 Volkswagen Group Electronic Research
2019
* All rights reserved.
@@ -143,6 +142,7 @@ struct isotp_sock {
143142
struct can_isotp_ll_options ll;
144143
u32 force_tx_stmin;
145144
u32 force_rx_stmin;
145+
u32 cfecho; /* consecutive frame echo tag */
146146
struct tpcon rx, tx;
147147
struct list_head notifier;
148148
wait_queue_head_t wait;
@@ -712,6 +712,63 @@ static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
712712
cf->data[0] = so->opt.ext_address;
713713
}
714714

715+
static void isotp_send_cframe(struct isotp_sock *so)
716+
{
717+
struct sock *sk = &so->sk;
718+
struct sk_buff *skb;
719+
struct net_device *dev;
720+
struct canfd_frame *cf;
721+
int can_send_ret;
722+
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
723+
724+
dev = dev_get_by_index(sock_net(sk), so->ifindex);
725+
if (!dev)
726+
return;
727+
728+
skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), GFP_ATOMIC);
729+
if (!skb) {
730+
dev_put(dev);
731+
return;
732+
}
733+
734+
can_skb_reserve(skb);
735+
can_skb_prv(skb)->ifindex = dev->ifindex;
736+
can_skb_prv(skb)->skbcnt = 0;
737+
738+
cf = (struct canfd_frame *)skb->data;
739+
skb_put_zero(skb, so->ll.mtu);
740+
741+
/* create consecutive frame */
742+
isotp_fill_dataframe(cf, so, ae, 0);
743+
744+
/* place consecutive frame N_PCI in appropriate index */
745+
cf->data[ae] = N_PCI_CF | so->tx.sn++;
746+
so->tx.sn %= 16;
747+
so->tx.bs++;
748+
749+
cf->flags = so->ll.tx_flags;
750+
751+
skb->dev = dev;
752+
can_skb_set_owner(skb, sk);
753+
754+
/* cfecho should have been zero'ed by init/isotp_rcv_echo() */
755+
if (so->cfecho)
756+
pr_notice_once("can-isotp: cfecho is %08X != 0\n", so->cfecho);
757+
758+
/* set consecutive frame echo tag */
759+
so->cfecho = *(u32 *)cf->data;
760+
761+
/* send frame with local echo enabled */
762+
can_send_ret = can_send(skb, 1);
763+
if (can_send_ret) {
764+
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
765+
__func__, ERR_PTR(can_send_ret));
766+
if (can_send_ret == -ENOBUFS)
767+
pr_notice_once("can-isotp: tx queue is full\n");
768+
}
769+
dev_put(dev);
770+
}
771+
715772
static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
716773
int ae)
717774
{
@@ -748,19 +805,74 @@ static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
748805
so->tx.state = ISOTP_WAIT_FIRST_FC;
749806
}
750807

808+
static void isotp_rcv_echo(struct sk_buff *skb, void *data)
809+
{
810+
struct sock *sk = (struct sock *)data;
811+
struct isotp_sock *so = isotp_sk(sk);
812+
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
813+
814+
/* only handle my own local echo skb's */
815+
if (skb->sk != sk || so->cfecho != *(u32 *)cf->data)
816+
return;
817+
818+
/* cancel local echo timeout */
819+
hrtimer_cancel(&so->txtimer);
820+
821+
/* local echo skb with consecutive frame has been consumed */
822+
so->cfecho = 0;
823+
824+
if (so->tx.idx >= so->tx.len) {
825+
/* we are done */
826+
so->tx.state = ISOTP_IDLE;
827+
wake_up_interruptible(&so->wait);
828+
return;
829+
}
830+
831+
if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
832+
/* stop and wait for FC with timeout */
833+
so->tx.state = ISOTP_WAIT_FC;
834+
hrtimer_start(&so->txtimer, ktime_set(1, 0),
835+
HRTIMER_MODE_REL_SOFT);
836+
return;
837+
}
838+
839+
/* no gap between data frames needed => use burst mode */
840+
if (!so->tx_gap) {
841+
isotp_send_cframe(so);
842+
return;
843+
}
844+
845+
/* start timer to send next consecutive frame with correct delay */
846+
hrtimer_start(&so->txtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
847+
}
848+
751849
static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
752850
{
753851
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
754852
txtimer);
755853
struct sock *sk = &so->sk;
756-
struct sk_buff *skb;
757-
struct net_device *dev;
758-
struct canfd_frame *cf;
759854
enum hrtimer_restart restart = HRTIMER_NORESTART;
760-
int can_send_ret;
761-
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
762855

763856
switch (so->tx.state) {
857+
case ISOTP_SENDING:
858+
859+
/* cfecho should be consumed by isotp_rcv_echo() here */
860+
if (!so->cfecho) {
861+
/* start timeout for unlikely lost echo skb */
862+
hrtimer_set_expires(&so->txtimer,
863+
ktime_add(ktime_get(),
864+
ktime_set(2, 0)));
865+
restart = HRTIMER_RESTART;
866+
867+
/* push out the next consecutive frame */
868+
isotp_send_cframe(so);
869+
break;
870+
}
871+
872+
/* cfecho has not been cleared in isotp_rcv_echo() */
873+
pr_notice_once("can-isotp: cfecho %08X timeout\n", so->cfecho);
874+
fallthrough;
875+
764876
case ISOTP_WAIT_FC:
765877
case ISOTP_WAIT_FIRST_FC:
766878

@@ -776,78 +888,6 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
776888
wake_up_interruptible(&so->wait);
777889
break;
778890

779-
case ISOTP_SENDING:
780-
781-
/* push out the next segmented pdu */
782-
dev = dev_get_by_index(sock_net(sk), so->ifindex);
783-
if (!dev)
784-
break;
785-
786-
isotp_tx_burst:
787-
skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv),
788-
GFP_ATOMIC);
789-
if (!skb) {
790-
dev_put(dev);
791-
break;
792-
}
793-
794-
can_skb_reserve(skb);
795-
can_skb_prv(skb)->ifindex = dev->ifindex;
796-
can_skb_prv(skb)->skbcnt = 0;
797-
798-
cf = (struct canfd_frame *)skb->data;
799-
skb_put_zero(skb, so->ll.mtu);
800-
801-
/* create consecutive frame */
802-
isotp_fill_dataframe(cf, so, ae, 0);
803-
804-
/* place consecutive frame N_PCI in appropriate index */
805-
cf->data[ae] = N_PCI_CF | so->tx.sn++;
806-
so->tx.sn %= 16;
807-
so->tx.bs++;
808-
809-
cf->flags = so->ll.tx_flags;
810-
811-
skb->dev = dev;
812-
can_skb_set_owner(skb, sk);
813-
814-
can_send_ret = can_send(skb, 1);
815-
if (can_send_ret) {
816-
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
817-
__func__, ERR_PTR(can_send_ret));
818-
if (can_send_ret == -ENOBUFS)
819-
pr_notice_once("can-isotp: tx queue is full, increasing txqueuelen may prevent this error\n");
820-
}
821-
if (so->tx.idx >= so->tx.len) {
822-
/* we are done */
823-
so->tx.state = ISOTP_IDLE;
824-
dev_put(dev);
825-
wake_up_interruptible(&so->wait);
826-
break;
827-
}
828-
829-
if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
830-
/* stop and wait for FC */
831-
so->tx.state = ISOTP_WAIT_FC;
832-
dev_put(dev);
833-
hrtimer_set_expires(&so->txtimer,
834-
ktime_add(ktime_get(),
835-
ktime_set(1, 0)));
836-
restart = HRTIMER_RESTART;
837-
break;
838-
}
839-
840-
/* no gap between data frames needed => use burst mode */
841-
if (!so->tx_gap)
842-
goto isotp_tx_burst;
843-
844-
/* start timer to send next data frame with correct delay */
845-
dev_put(dev);
846-
hrtimer_set_expires(&so->txtimer,
847-
ktime_add(ktime_get(), so->tx_gap));
848-
restart = HRTIMER_RESTART;
849-
break;
850-
851891
default:
852892
WARN_ON_ONCE(1);
853893
}
@@ -1075,6 +1115,9 @@ static int isotp_release(struct socket *sock)
10751115
can_rx_unregister(net, dev, so->rxid,
10761116
SINGLE_MASK(so->rxid),
10771117
isotp_rcv, sk);
1118+
can_rx_unregister(net, dev, so->txid,
1119+
SINGLE_MASK(so->txid),
1120+
isotp_rcv_echo, sk);
10781121
dev_put(dev);
10791122
synchronize_rcu();
10801123
}
@@ -1161,11 +1204,20 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
11611204

11621205
ifindex = dev->ifindex;
11631206

1164-
if (do_rx_reg)
1207+
if (do_rx_reg) {
11651208
can_rx_register(net, dev, addr->can_addr.tp.rx_id,
11661209
SINGLE_MASK(addr->can_addr.tp.rx_id),
11671210
isotp_rcv, sk, "isotp", sk);
11681211

1212+
/* no consecutive frame echo skb in flight */
1213+
so->cfecho = 0;
1214+
1215+
/* register for echo skb's */
1216+
can_rx_register(net, dev, addr->can_addr.tp.tx_id,
1217+
SINGLE_MASK(addr->can_addr.tp.tx_id),
1218+
isotp_rcv_echo, sk, "isotpe", sk);
1219+
}
1220+
11691221
dev_put(dev);
11701222

11711223
if (so->bound && do_rx_reg) {
@@ -1176,6 +1228,9 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
11761228
can_rx_unregister(net, dev, so->rxid,
11771229
SINGLE_MASK(so->rxid),
11781230
isotp_rcv, sk);
1231+
can_rx_unregister(net, dev, so->txid,
1232+
SINGLE_MASK(so->txid),
1233+
isotp_rcv_echo, sk);
11791234
dev_put(dev);
11801235
}
11811236
}
@@ -1381,10 +1436,14 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg,
13811436
case NETDEV_UNREGISTER:
13821437
lock_sock(sk);
13831438
/* remove current filters & unregister */
1384-
if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST)))
1439+
if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
13851440
can_rx_unregister(dev_net(dev), dev, so->rxid,
13861441
SINGLE_MASK(so->rxid),
13871442
isotp_rcv, sk);
1443+
can_rx_unregister(dev_net(dev), dev, so->txid,
1444+
SINGLE_MASK(so->txid),
1445+
isotp_rcv_echo, sk);
1446+
}
13881447

13891448
so->ifindex = 0;
13901449
so->bound = 0;

0 commit comments

Comments
 (0)