Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
samples: ivshmem_flat: Add ivshmem_flat sample
Add a sample to exercise QEMU ivshmem-flat device.

The sample consists in two VMs in an ivshmem notification ping-pong,
which exercises both the ivshmem interruption (mmr registers) and the
shared memory address space (shmem).

On start, the first VM will register its ivshmem peer ID in the ivshmem
shared memory and wait to be interrupted by the second VM. The second VM
will also register its ID in the ivshmem shared memory and read the
other VM's ivshmem peer ID, then it will notify/interrupt the first VM
at vector 0. Once the first VM is interrupted, it reads the second VM's
peer ID and notifies/interrupts it in return. Once the second VM is
interrupted, the sequence starts again, so the two VMs engage in a kind
of ivshmem ping-pong.

Signed-off-by: Gustavo Romero <gustavo.romero@linaro.org>
  • Loading branch information
gromero committed Nov 27, 2023
1 parent fb9a6af commit 73fbd48
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 0 deletions.
8 changes: 8 additions & 0 deletions samples/ivshmem_flat/CMakeLists.txt
@@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0

cmake_minimum_required(VERSION 3.20.0)

find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(ivshmem_flat)

target_sources(app PRIVATE src/main.c)
1 change: 1 addition & 0 deletions samples/ivshmem_flat/README.md
@@ -0,0 +1 @@
See comments in `src/main.c` for details on how to run this sample.
1 change: 1 addition & 0 deletions samples/ivshmem_flat/prj.conf
@@ -0,0 +1 @@
# nothing here
167 changes: 167 additions & 0 deletions samples/ivshmem_flat/src/main.c
@@ -0,0 +1,167 @@
/*
* Copyright (c) 2023 Linaro.
*
* SPDX-License-Identifier: Apache-2.0
*/

#include <zephyr/kernel.h>

/* This is a simple sample for testing the ivshmem-flat QEMU device, which is
* a device like the ivshmem PCI but meant to be used on Cortex-M and MCUs
* machines, which usually don't have a PCI bus available.
*
* The sample/test consists in having two VMs with the ivshmem-flat device
* configured and both VMs running this Zephyr sample.
*
* On start, the first VM will register its ivshmem peer ID in the ivshmem
* shared memory address and wait to be interrupted by the second VM. The second
* VM will also register its ID in the ivshmem shared memory address and read
* the other VM's ivshmem peer ID, then it will notify/interrupt the first VM at
* vector 0. Once the first VM is interrupted, it reads the second VM's peer ID
* and notifies/interrupts it in return. Once the second VM is interrupted, the
* sequence starts again, so the two VMs engage in a kind of ivshmem ping-pong.
*
* To run this sample it's necessary to first of all start the ivshmem-server,
* found in the QEMU tree, usually at build/contrib/ivshmem-server.
*
* You need to run the ivshmem-server with -F and it will create a Unix socket
* at /tmp/ivshmem_socket.
*
* Then compile and run the generated zephyr.elf in two different QEMU instances
* running on the same host where the ivshmem-server is running.
*
* To compile the zephyr.elf using 'west', do:
*
* $ west -v --verbose build -p always -b mps2_an385 ./samples/ivshmem_flat/
*
* With -v and --verbose you can see where the .elf image is generated by west.
*
* Then run the first VM:
*
* $ ./qemu-system-arm -cpu cortex-m3 -machine mps2-an385 -nographic -net none
* -chardev stdio,id=con,mux=on -serial chardev:con
* -mon chardev=con,mode=readline
* -chardev socket,path=/tmp/ivshmem_socket,id=ivshmem_flat
* -device ivshmem-flat,x-irq-qompath='/machine/armv7m/nvic/unnamed-gpio-in[0]',x-bus-qompath="/sysbus",chardev=ivshmem_flat
* -kernel zephyr.elf
*
* And finally start another VM -- the second one is responsible to initiate the
* ping-pong -- using the very same QEMU command line above.
*
* The following (similar) output should be displayed on the terminal for both
* machines:
*
* *** Booting Zephyr OS build zephyr-v3.3.0-8349-gce314136f6ca ***
* *** Board: mps2_an385
* *** Installing direct IRQ handler for external IRQ0 (Exception #16)...
* *** Enabling IRQ0 in the NVIC logic...
* *** Received IVSHMEM PEER ID: 1
* *** Current peer counter is 0, incrementing it by one.
* *** New *counter_ptr = 1
* *** Waiting ping from another peer...
* *** Got interrupt at vector 0!
* *** Pinging back peer ID 2 at vector 0...
* *** Got interrupt at vector 0!
* *** Pinging back peer ID 2 at vector 0...
* *** Got interrupt at vector 0!
* *** Pinging back peer ID 2 at vector 0...
* [...]
*
*/

#define IRQ 0 /* GPIO Port A */
#define IRQ_PRIO 2 /* Priority */
#define IRQ_FLAGS 0 /* Not used in Arm */

/*
* MMR-related addresses
*/
#define MMR_BASE_ADDRESS 0x400FF000

volatile uint32_t *ivposition = (void *)(MMR_BASE_ADDRESS + 8); /* IVPOSITION */
volatile uint32_t *doorbell = (void *)(MMR_BASE_ADDRESS + 12); /* DOORBELL */

/*
* SHMEM-related addresses
*/
#define SHMEM_BASE_ADDRESS 0x40100000

volatile uint32_t *shmem_base_address = (void *)SHMEM_BASE_ADDRESS;
volatile uint32_t *counter_ptr = (void *)(SHMEM_BASE_ADDRESS + 0);

/*
* SHMEM_BASE_ADDRESS + 0: Peer counter (number of peers connected)
* SHMEM_BASE_ADDRESS + 4: ID of peer connected at first
* SHMEM_BASE_ADDRESS + 8: ID of peer connected after
*/

volatile int other_peer_id_pos;

__no_optimization void custom_delay(void)
{
for (int i = 0; i < 1024 * 1024 * 256; i++);
}

ISR_DIRECT_DECLARE(gpio_portA_isr)
{
uint32_t vector_id = 0; /* Only vector 0 is supported by the ivshmem-flat */

printk("*** Got interrupt at vector %d!\n", vector_id);

/* Interrupt the other peer, use vector 0 as well. */
uint16_t other_peer_id = *(shmem_base_address + 1 /* skip peer counter */ + other_peer_id_pos);
custom_delay();
printk("*** Pinging back peer ID %d at vector %d... \n", other_peer_id, vector_id);
*doorbell = (other_peer_id << 16) + vector_id;

return 1;
}

int main(void)
{
int own_peer_id_pos;

printk("*** Board: %s\n", CONFIG_BOARD);

printk("*** Installing direct IRQ handler for external IRQ%d (Exception #16)...\n", IRQ);
IRQ_DIRECT_CONNECT(IRQ, IRQ_PRIO, gpio_portA_isr, IRQ_FLAGS);

printk("*** Enabling IRQ%d in the NVIC logic...\n", IRQ);
irq_enable(IRQ);

printk("*** Received IVSHMEM PEER ID: %d\n", *ivposition);

/* Register its own ID in the shmem so the other peer can see it */
own_peer_id_pos = *counter_ptr; /* Own ID position in shmem is based on the peer counter */
*(shmem_base_address + 1 /* skip peer counter */ + own_peer_id_pos) = *ivposition;

printk("*** Current peer counter is %d, incrementing it by one.\n", *counter_ptr);
*counter_ptr = *counter_ptr + 1;

/* Assert maximum 2 peers are/were connected to the server so the setup works */
if (*counter_ptr > 2) {
printk("*** Sorry, can't start ping-pong: there seems more than 2 peers are/were connected to the server!\n");
printk("*** Disconnect all peers and restart the server\n");
while (1) { /* Empty */ }
}

/* If 2 peers connected, notify the other peer using vector 0 */
printk("*** New *counter_ptr = %d\n", *counter_ptr);
if (*counter_ptr == 2) {
uint16_t vector_id = 0;
uint16_t other_peer_id;

printk("*** Starting ping-pong, pinging the other peer... :-) \n");
other_peer_id_pos = 0; /* position 0 => other peer ID; position 1 => own peer ID */
other_peer_id = *(shmem_base_address + 1 /* skip peer counter */ + other_peer_id_pos);
*doorbell = (other_peer_id << 16) + vector_id; /* Effectively interrupt the other peer */
} else { /* If just 1 peer connected, the other one, after connecting, will start the ping-pong */
printk("*** Waiting ping from another peer...\n");
other_peer_id_pos = 1; /* position 0 => own peer ID; position 1 => (future) other peer ID */
/* Wait another peer to connect and interrupt us */
}

while (1) { /* Empty */ }

return 0;
}

0 comments on commit 73fbd48

Please sign in to comment.