Skip to content

Commit

Permalink
virtqueue: define a common cache flush/invalidate
Browse files Browse the repository at this point in the history
Since all cache operations, for vrings, buffer and
resource table are using metal_cache_flush and
metal_cache_invalidate, define a common function for all.

Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
  • Loading branch information
iuliana-prodan authored and arnopo committed Sep 18, 2023
1 parent 95a9038 commit b4f10fb
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 37 deletions.
4 changes: 2 additions & 2 deletions lib/include/openamp/remoteproc_virtio.h
Expand Up @@ -26,8 +26,8 @@ extern "C" {

/* cache invalidation helpers for resource table */
#ifdef VIRTIO_CACHED_RSC_TABLE
#define RSC_TABLE_FLUSH(x, s) metal_cache_flush(x, s)
#define RSC_TABLE_INVALIDATE(x, s) metal_cache_invalidate(x, s)
#define RSC_TABLE_FLUSH(x, s) CACHE_FLUSH(x, s)
#define RSC_TABLE_INVALIDATE(x, s) CACHE_INVALIDATE(x, s)
#else
#define RSC_TABLE_FLUSH(x, s) do { } while (0)
#define RSC_TABLE_INVALIDATE(x, s) do { } while (0)
Expand Down
4 changes: 2 additions & 2 deletions lib/include/openamp/rpmsg_virtio.h
Expand Up @@ -31,8 +31,8 @@ extern "C" {
#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */

#ifdef VIRTIO_CACHED_BUFFERS
#define BUFFER_FLUSH(x, s) metal_cache_flush(x, s)
#define BUFFER_INVALIDATE(x, s) metal_cache_invalidate(x, s)
#define BUFFER_FLUSH(x, s) CACHE_FLUSH(x, s)
#define BUFFER_INVALIDATE(x, s) CACHE_INVALIDATE(x, s)
#else
#define BUFFER_FLUSH(x, s) do { } while (0)
#define BUFFER_INVALIDATE(x, s) do { } while (0)
Expand Down
8 changes: 6 additions & 2 deletions lib/include/openamp/virtqueue.h
Expand Up @@ -48,9 +48,13 @@ extern "C" {
/* Support to suppress interrupt until specific index is reached. */
#define VIRTIO_RING_F_EVENT_IDX (1 << 29)

/* cache invalidation helpers */
#define CACHE_FLUSH(x, s) metal_cache_flush(x, s)
#define CACHE_INVALIDATE(x, s) metal_cache_invalidate(x, s)

#ifdef VIRTIO_CACHED_VRINGS
#define VRING_FLUSH(x, s) metal_cache_flush(&x, s)
#define VRING_INVALIDATE(x, s) metal_cache_invalidate(&x, s)
#define VRING_FLUSH(x, s) CACHE_FLUSH(x, s)
#define VRING_INVALIDATE(x, s) CACHE_INVALIDATE(x, s)
#else
#define VRING_FLUSH(x, s) do { } while (0)
#define VRING_INVALIDATE(x, s) do { } while (0)
Expand Down
62 changes: 31 additions & 31 deletions lib/virtio/virtqueue.c
Expand Up @@ -143,7 +143,7 @@ void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
uint16_t used_idx, desc_idx;

/* Used.idx is updated by the virtio device, so we need to invalidate */
VRING_INVALIDATE(vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
VRING_INVALIDATE(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));

if (!vq || vq->vq_used_cons_idx == vq->vq_ring.used->idx)
return NULL;
Expand All @@ -156,7 +156,7 @@ void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)
atomic_thread_fence(memory_order_seq_cst);

/* Used.ring is written by remote, invalidate it */
VRING_INVALIDATE(vq->vq_ring.used->ring[used_idx],
VRING_INVALIDATE(&vq->vq_ring.used->ring[used_idx],
sizeof(vq->vq_ring.used->ring[used_idx]));

desc_idx = (uint16_t)uep->id;
Expand All @@ -177,14 +177,14 @@ void *virtqueue_get_buffer(struct virtqueue *vq, uint32_t *len, uint16_t *idx)

uint32_t virtqueue_get_buffer_length(struct virtqueue *vq, uint16_t idx)
{
VRING_INVALIDATE(vq->vq_ring.desc[idx].len,
VRING_INVALIDATE(&vq->vq_ring.desc[idx].len,
sizeof(vq->vq_ring.desc[idx].len));
return vq->vq_ring.desc[idx].len;
}

void *virtqueue_get_buffer_addr(struct virtqueue *vq, uint16_t idx)
{
VRING_INVALIDATE(vq->vq_ring.desc[idx].addr,
VRING_INVALIDATE(&vq->vq_ring.desc[idx].addr,
sizeof(vq->vq_ring.desc[idx].addr));
return virtqueue_phys_to_virt(vq, vq->vq_ring.desc[idx].addr);
}
Expand All @@ -211,7 +211,7 @@ void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
atomic_thread_fence(memory_order_seq_cst);

/* Avail.idx is updated by driver, invalidate it */
VRING_INVALIDATE(vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
return NULL;
}
Expand All @@ -221,12 +221,12 @@ void *virtqueue_get_available_buffer(struct virtqueue *vq, uint16_t *avail_idx,
head_idx = vq->vq_available_idx++ & (vq->vq_nentries - 1);

/* Avail.ring is updated by driver, invalidate it */
VRING_INVALIDATE(vq->vq_ring.avail->ring[head_idx],
VRING_INVALIDATE(&vq->vq_ring.avail->ring[head_idx],
sizeof(vq->vq_ring.avail->ring[head_idx]));
*avail_idx = vq->vq_ring.avail->ring[head_idx];

/* Invalidate the desc entry written by driver before accessing it */
VRING_INVALIDATE(vq->vq_ring.desc[*avail_idx],
VRING_INVALIDATE(&vq->vq_ring.desc[*avail_idx],
sizeof(vq->vq_ring.desc[*avail_idx]));
buffer = virtqueue_phys_to_virt(vq, vq->vq_ring.desc[*avail_idx].addr);
*len = vq->vq_ring.desc[*avail_idx].len;
Expand Down Expand Up @@ -255,15 +255,15 @@ int virtqueue_add_consumed_buffer(struct virtqueue *vq, uint16_t head_idx,
used_desc->len = len;

/* We still need to flush it because this is read by driver */
VRING_FLUSH(vq->vq_ring.used->ring[used_idx],
VRING_FLUSH(&vq->vq_ring.used->ring[used_idx],
sizeof(vq->vq_ring.used->ring[used_idx]));

atomic_thread_fence(memory_order_seq_cst);

vq->vq_ring.used->idx++;

/* Used.idx is read by driver, so we need to flush it */
VRING_FLUSH(vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
VRING_FLUSH(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));

/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
Expand All @@ -287,30 +287,30 @@ void virtqueue_disable_cb(struct virtqueue *vq)
if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) {
vring_used_event(&vq->vq_ring) =
vq->vq_used_cons_idx - vq->vq_nentries - 1;
VRING_FLUSH(vring_used_event(&vq->vq_ring),
VRING_FLUSH(&vring_used_event(&vq->vq_ring),
sizeof(vring_used_event(&vq->vq_ring)));
}
#endif /*VIRTIO_DEVICE_ONLY*/
#ifndef VIRTIO_DRIVER_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) {
vring_avail_event(&vq->vq_ring) =
vq->vq_available_idx - vq->vq_nentries - 1;
VRING_FLUSH(vring_avail_event(&vq->vq_ring),
VRING_FLUSH(&vring_avail_event(&vq->vq_ring),
sizeof(vring_avail_event(&vq->vq_ring)));
}
#endif /*VIRTIO_DRIVER_ONLY*/
} else {
#ifndef VIRTIO_DEVICE_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) {
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
VRING_FLUSH(vq->vq_ring.avail->flags,
VRING_FLUSH(&vq->vq_ring.avail->flags,
sizeof(vq->vq_ring.avail->flags));
}
#endif /*VIRTIO_DEVICE_ONLY*/
#ifndef VIRTIO_DRIVER_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) {
vq->vq_ring.used->flags |= VRING_USED_F_NO_NOTIFY;
VRING_FLUSH(vq->vq_ring.used->flags,
VRING_FLUSH(&vq->vq_ring.used->flags,
sizeof(vq->vq_ring.used->flags));
}
#endif /*VIRTIO_DRIVER_ONLY*/
Expand Down Expand Up @@ -339,8 +339,8 @@ void virtqueue_dump(struct virtqueue *vq)
if (!vq)
return;

VRING_INVALIDATE(vq->vq_ring.avail, sizeof(vq->vq_ring.avail));
VRING_INVALIDATE(vq->vq_ring.used, sizeof(vq->vq_ring.used));
VRING_INVALIDATE(&vq->vq_ring.avail, sizeof(vq->vq_ring.avail));
VRING_INVALIDATE(&vq->vq_ring.used, sizeof(vq->vq_ring.used));

metal_log(METAL_LOG_DEBUG,
"VQ: %s - size=%d; free=%d; queued=%d; desc_head_idx=%d; "
Expand All @@ -360,7 +360,7 @@ uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
uint32_t len = 0;

/* Avail.idx is updated by driver, invalidate it */
VRING_INVALIDATE(vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));

if (vq->vq_available_idx == vq->vq_ring.avail->idx) {
return 0;
Expand All @@ -371,12 +371,12 @@ uint32_t virtqueue_get_desc_size(struct virtqueue *vq)
head_idx = vq->vq_available_idx & (vq->vq_nentries - 1);

/* Avail.ring is updated by driver, invalidate it */
VRING_INVALIDATE(vq->vq_ring.avail->ring[head_idx],
VRING_INVALIDATE(&vq->vq_ring.avail->ring[head_idx],
sizeof(vq->vq_ring.avail->ring[head_idx]));
avail_idx = vq->vq_ring.avail->ring[head_idx];

/* Invalidate the desc entry written by driver before accessing it */
VRING_INVALIDATE(vq->vq_ring.desc[avail_idx].len,
VRING_INVALIDATE(&vq->vq_ring.desc[avail_idx].len,
sizeof(vq->vq_ring.desc[avail_idx].len));

len = vq->vq_ring.desc[avail_idx].len;
Expand Down Expand Up @@ -432,7 +432,7 @@ static uint16_t vq_ring_add_buffer(struct virtqueue *vq,
* Instead of flushing the whole desc region, we flush only the
* single entry hopefully saving some cycles
*/
VRING_FLUSH(desc[idx], sizeof(desc[idx]));
VRING_FLUSH(&desc[idx], sizeof(desc[idx]));

}

Expand Down Expand Up @@ -531,15 +531,15 @@ static void vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
vq->vq_ring.avail->ring[avail_idx] = desc_idx;

/* We still need to flush the ring */
VRING_FLUSH(vq->vq_ring.avail->ring[avail_idx],
VRING_FLUSH(&vq->vq_ring.avail->ring[avail_idx],
sizeof(vq->vq_ring.avail->ring[avail_idx]));

atomic_thread_fence(memory_order_seq_cst);

vq->vq_ring.avail->idx++;

/* And the index */
VRING_FLUSH(vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
VRING_FLUSH(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));

/* Keep pending count until virtqueue_notify(). */
vq->vq_queued_cnt++;
Expand All @@ -561,30 +561,30 @@ static int vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) {
vring_used_event(&vq->vq_ring) =
vq->vq_used_cons_idx + ndesc;
VRING_FLUSH(vring_used_event(&vq->vq_ring),
VRING_FLUSH(&vring_used_event(&vq->vq_ring),
sizeof(vring_used_event(&vq->vq_ring)));
}
#endif /*VIRTIO_DEVICE_ONLY*/
#ifndef VIRTIO_DRIVER_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) {
vring_avail_event(&vq->vq_ring) =
vq->vq_available_idx + ndesc;
VRING_FLUSH(vring_avail_event(&vq->vq_ring),
VRING_FLUSH(&vring_avail_event(&vq->vq_ring),
sizeof(vring_avail_event(&vq->vq_ring)));
}
#endif /*VIRTIO_DRIVER_ONLY*/
} else {
#ifndef VIRTIO_DEVICE_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) {
vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
VRING_FLUSH(vq->vq_ring.avail->flags,
VRING_FLUSH(&vq->vq_ring.avail->flags,
sizeof(vq->vq_ring.avail->flags));
}
#endif /*VIRTIO_DEVICE_ONLY*/
#ifndef VIRTIO_DRIVER_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) {
vq->vq_ring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
VRING_FLUSH(vq->vq_ring.used->flags,
VRING_FLUSH(&vq->vq_ring.used->flags,
sizeof(vq->vq_ring.used->flags));
}
#endif /*VIRTIO_DRIVER_ONLY*/
Expand Down Expand Up @@ -642,7 +642,7 @@ static int vq_ring_must_notify(struct virtqueue *vq)
/* CACHE: no need to invalidate avail */
new_idx = vq->vq_ring.avail->idx;
prev_idx = new_idx - vq->vq_queued_cnt;
VRING_INVALIDATE(vring_avail_event(&vq->vq_ring),
VRING_INVALIDATE(&vring_avail_event(&vq->vq_ring),
sizeof(vring_avail_event(&vq->vq_ring)));
event_idx = vring_avail_event(&vq->vq_ring);
return vring_need_event(event_idx, new_idx,
Expand All @@ -654,7 +654,7 @@ static int vq_ring_must_notify(struct virtqueue *vq)
/* CACHE: no need to invalidate used */
new_idx = vq->vq_ring.used->idx;
prev_idx = new_idx - vq->vq_queued_cnt;
VRING_INVALIDATE(vring_used_event(&vq->vq_ring),
VRING_INVALIDATE(&vring_used_event(&vq->vq_ring),
sizeof(vring_used_event(&vq->vq_ring)));
event_idx = vring_used_event(&vq->vq_ring);
return vring_need_event(event_idx, new_idx,
Expand All @@ -664,15 +664,15 @@ static int vq_ring_must_notify(struct virtqueue *vq)
} else {
#ifndef VIRTIO_DEVICE_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DRIVER) {
VRING_INVALIDATE(vq->vq_ring.used->flags,
VRING_INVALIDATE(&vq->vq_ring.used->flags,
sizeof(vq->vq_ring.used->flags));
return (vq->vq_ring.used->flags &
VRING_USED_F_NO_NOTIFY) == 0;
}
#endif /*VIRTIO_DEVICE_ONLY*/
#ifndef VIRTIO_DRIVER_ONLY
if (vq->vq_dev->role == VIRTIO_DEV_DEVICE) {
VRING_INVALIDATE(vq->vq_ring.avail->flags,
VRING_INVALIDATE(&vq->vq_ring.avail->flags,
sizeof(vq->vq_ring.avail->flags));
return (vq->vq_ring.avail->flags &
VRING_AVAIL_F_NO_INTERRUPT) == 0;
Expand Down Expand Up @@ -705,7 +705,7 @@ static int virtqueue_nused(struct virtqueue *vq)
uint16_t used_idx, nused;

/* Used is written by remote */
VRING_INVALIDATE(vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
VRING_INVALIDATE(&vq->vq_ring.used->idx, sizeof(vq->vq_ring.used->idx));
used_idx = vq->vq_ring.used->idx;

nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
Expand All @@ -726,7 +726,7 @@ static int virtqueue_navail(struct virtqueue *vq)
uint16_t avail_idx, navail;

/* Avail is written by driver */
VRING_INVALIDATE(vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));
VRING_INVALIDATE(&vq->vq_ring.avail->idx, sizeof(vq->vq_ring.avail->idx));

avail_idx = vq->vq_ring.avail->idx;

Expand Down

0 comments on commit b4f10fb

Please sign in to comment.