Skip to content

Commit 50ecd93

Browse files
Tomas Winklerwenlingz
authored andcommitted
dm: mei: implement tx flow
TX from virtio driver is handled firt via virio tx notify handler vmei_notify_tx(), placed into host clinet tx circular buffer. TX thread will then write the data to the device. Tracked-On: #1536 Signed-off-by: Vitaly Lubart <vitaly.lubart@intel.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Acked-by: Wang, Yu1 <yu1.wang@intel.com>
1 parent 483a893 commit 50ecd93

File tree

1 file changed

+214
-0
lines changed

1 file changed

+214
-0
lines changed

devicemodel/hw/pci/virtio/virtio_mei.c

Lines changed: 214 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1350,6 +1350,11 @@ vmei_hbm_handler(struct virtio_mei *vmei, const void *data)
13501350
}
13511351
}
13521352

1353+
static inline bool hdr_is_hbm(const struct mei_msg_hdr *hdr)
1354+
{
1355+
return hdr->host_addr == 0 && hdr->me_addr == 0;
1356+
}
1357+
13531358
static ssize_t
13541359
vmei_host_client_native_write(struct vmei_host_client *hclient)
13551360
{
@@ -1410,6 +1415,215 @@ vmei_host_client_native_write(struct vmei_host_client *hclient)
14101415
return lencnt;
14111416
}
14121417

1418+
static void
1419+
vmei_proc_tx(struct virtio_mei *vmei, struct virtio_vq_info *vq)
1420+
{
1421+
struct iovec iov[VMEI_TX_SEGS + 1];
1422+
uint16_t idx;
1423+
size_t tlen;
1424+
int n;
1425+
1426+
struct mei_msg_hdr *hdr;
1427+
uint8_t *data;
1428+
uint8_t i_idx;
1429+
struct vmei_circular_iobufs *bufs;
1430+
1431+
struct vmei_host_client *hclient = NULL;
1432+
1433+
/*
1434+
* Obtain chain of descriptors.
1435+
* The first one is hdr, the second is for payload.
1436+
*/
1437+
n = vq_getchain(vq, &idx, iov, VMEI_TX_SEGS, NULL);
1438+
assert(n == 2);
1439+
1440+
hdr = (struct mei_msg_hdr *)iov[0].iov_base;
1441+
data = (uint8_t *)iov[1].iov_base;
1442+
1443+
tlen = iov[0].iov_len + iov[1].iov_len;
1444+
1445+
DPRINTF("TX: UOS->DM, hdr[h=%02d me=%02d comp=%1d] length[%d]\n",
1446+
hdr->host_addr, hdr->me_addr, hdr->msg_complete, hdr->length);
1447+
vmei_dbg_print_hex("TX: UOS->DM", iov[1].iov_base, iov[1].iov_len);
1448+
1449+
if (hdr_is_hbm(hdr)) {
1450+
vmei_hbm_handler(vmei, data);
1451+
goto out;
1452+
}
1453+
/* general client client
1454+
* must be in active_clients list.
1455+
*/
1456+
hclient = vmei_find_host_client(vmei, hdr->me_addr, hdr->host_addr);
1457+
if (!hclient) {
1458+
DPRINTF("TX: ME[%02d:%02d] NOT found!\n",
1459+
hdr->host_addr, hdr->host_addr);
1460+
goto failed;
1461+
}
1462+
1463+
pthread_mutex_lock(&vmei->tx_mutex);
1464+
bufs = &hclient->send_bufs;
1465+
i_idx = bufs->i_idx;
1466+
HCL_DBG(hclient, "TX: client found complete = %d\n",
1467+
bufs->complete[i_idx]);
1468+
/* check for overflow
1469+
* here there are 2 types of possible overflows :
1470+
* (1) no available buffers (all buffers are taken) and
1471+
* (2) no space in the current buffer
1472+
*/
1473+
if ((i_idx + 1) % VMEI_IOBUFS_MAX == bufs->r_idx ||
1474+
(bufs->buf_sz - bufs->bufs[i_idx].iov_len < hdr->length)) {
1475+
HCL_DBG(hclient, "TX: overflow\n");
1476+
/* close the connection according to spec */
1477+
/* FIXME need to remove the clinet */
1478+
vmei_hbm_disconnect_client(hclient);
1479+
pthread_mutex_unlock(&vmei->tx_mutex);
1480+
vmei_host_client_put(hclient);
1481+
goto out;
1482+
}
1483+
/* copy buffer from virtqueue to send_buf */
1484+
memcpy(bufs->bufs[i_idx].iov_base + bufs->bufs[i_idx].iov_len,
1485+
data, hdr->length);
1486+
1487+
bufs->bufs[i_idx].iov_len += hdr->length;
1488+
if (hdr->msg_complete) {
1489+
/* send complete msg to HW */
1490+
HCL_DBG(hclient, "TX: completed, sening msg to FW\n");
1491+
bufs->complete[i_idx] = 1;
1492+
bufs->i_idx++;
1493+
if (bufs->i_idx >= VMEI_IOBUFS_MAX) /* wraparound */
1494+
bufs->i_idx = 0;
1495+
pthread_cond_signal(&vmei->tx_cond);
1496+
}
1497+
pthread_mutex_unlock(&vmei->tx_mutex);
1498+
vmei_host_client_put(hclient);
1499+
out:
1500+
/* chain is processed, release it and set tlen */
1501+
vq_relchain(vq, idx, tlen);
1502+
DPRINTF("TX: release OUT-vq idx[%d]\n", idx);
1503+
1504+
if (vmei->rx_need_sched) {
1505+
pthread_mutex_lock(&vmei->rx_mutex);
1506+
pthread_cond_signal(&vmei->rx_cond);
1507+
pthread_mutex_unlock(&vmei->rx_mutex);
1508+
}
1509+
1510+
return;
1511+
1512+
failed:
1513+
if (vmei->status == VMEI_STS_PENDING_RESET) {
1514+
vmei_virtual_fw_reset(vmei);
1515+
/* Let's wait 100ms for HBM enumeration done */
1516+
usleep(100000);
1517+
virtio_config_changed(&vmei->base);
1518+
}
1519+
/* drop the data */
1520+
vq_relchain(vq, idx, tlen);
1521+
}
1522+
1523+
static void
1524+
vmei_notify_tx(void *data, struct virtio_vq_info *vq)
1525+
{
1526+
struct virtio_mei *vmei = data;
1527+
/*
1528+
* Any ring entries to process?
1529+
*/
1530+
if (!vq_has_descs(vq))
1531+
return;
1532+
1533+
pthread_mutex_lock(&vmei->tx_mutex);
1534+
DPRINTF("TX: New OUT buffer available!\n");
1535+
vq->used->flags |= ACRN_VRING_USED_F_NO_NOTIFY;
1536+
pthread_mutex_unlock(&vmei->tx_mutex);
1537+
1538+
while (vq_has_descs(vq))
1539+
vmei_proc_tx(vmei, vq);
1540+
1541+
vq_endchains(vq, 1);
1542+
1543+
pthread_mutex_lock(&vmei->tx_mutex);
1544+
DPRINTF("TX: New OUT buffer available!\n");
1545+
vq->used->flags &= ~ACRN_VRING_USED_F_NO_NOTIFY;
1546+
pthread_mutex_unlock(&vmei->tx_mutex);
1547+
}
1548+
1549+
static int
1550+
vmei_host_ready_send_buffers(struct vmei_host_client *hclient)
1551+
{
1552+
struct vmei_circular_iobufs *bufs = &hclient->send_bufs;
1553+
1554+
return bufs->complete[bufs->r_idx];
1555+
}
1556+
1557+
/**
1558+
* Thread which will handle processing native writes
1559+
*/
1560+
static void *vmei_tx_thread(void *param)
1561+
{
1562+
struct virtio_mei *vmei = param;
1563+
struct timespec max_wait = {0, 0};
1564+
int err;
1565+
1566+
pthread_mutex_lock(&vmei->tx_mutex);
1567+
err = pthread_cond_wait(&vmei->tx_cond, &vmei->tx_mutex);
1568+
assert(err == 0);
1569+
if (err)
1570+
goto out;
1571+
1572+
while (vmei->status != VMEI_STST_DEINIT) {
1573+
struct vmei_me_client *me;
1574+
struct vmei_host_client *e;
1575+
ssize_t len;
1576+
int pending_cnt = 0;
1577+
int send_ready = 0;
1578+
1579+
pthread_mutex_lock(&vmei->list_mutex);
1580+
LIST_FOREACH(me, &vmei->active_clients, list) {
1581+
pthread_mutex_lock(&me->list_mutex);
1582+
LIST_FOREACH(e, &me->connections, list) {
1583+
if (!vmei_host_ready_send_buffers(e))
1584+
continue;
1585+
1586+
len = vmei_host_client_native_write(e);
1587+
if (len < 0 && len != -EAGAIN) {
1588+
HCL_WARN(e, "TX:send failed %zd\n",
1589+
len);
1590+
pthread_mutex_unlock(&me->list_mutex);
1591+
goto unlock;
1592+
}
1593+
if (vmei->status == VMEI_STS_RESET) {
1594+
pthread_mutex_unlock(&me->list_mutex);
1595+
goto unlock;
1596+
}
1597+
1598+
send_ready = vmei_host_ready_send_buffers(e);
1599+
pending_cnt += send_ready;
1600+
if (!send_ready)
1601+
vmei_hbm_flow_ctl_req(e);
1602+
}
1603+
pthread_mutex_unlock(&me->list_mutex);
1604+
}
1605+
unlock:
1606+
pthread_mutex_unlock(&vmei->list_mutex);
1607+
1608+
if (pending_cnt == 0) {
1609+
err = pthread_cond_wait(&vmei->tx_cond,
1610+
&vmei->tx_mutex);
1611+
} else {
1612+
max_wait.tv_sec = time(NULL) + 2;
1613+
max_wait.tv_nsec = 0;
1614+
err = pthread_cond_timedwait(&vmei->tx_cond,
1615+
&vmei->tx_mutex,
1616+
&max_wait);
1617+
}
1618+
1619+
if (vmei->status == VMEI_STST_DEINIT)
1620+
goto out;
1621+
}
1622+
out:
1623+
pthread_mutex_unlock(&vmei->tx_mutex);
1624+
pthread_exit(NULL);
1625+
}
1626+
14131627
/*
14141628
* A completed read guarantees that a client message is completed,
14151629
* transmission of client message is started by a flow control message of HBM

0 commit comments

Comments
 (0)