Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

HVM-750 guest virtio drivers are racy with respect to interrupts

HVM-751 CONFIGURE_ONLY check in build.sh is wrong
HVM-752 Import qemu-kvm.git barrier changes
  • Loading branch information...
commit 6d85df9c5991c26ead6195ef6eed31e604b14db5 1 parent a28b557
Robert Mustacchi rmustacc authored
4 build.sh
@@ -75,9 +75,9 @@ KERNEL_SOURCE="${KERNEL_SOURCE:-$(pwd)/../../illumos}"
75 75 CTFBINDIR="$KERNEL_SOURCE"/usr/src/tools/proto/root_i386-nd/opt/onbld/bin/i386
76 76 export PATH="$PATH:$CTFBINDIR"
77 77
78   -if [[ -z "CONFIGURE_ONLY" ]]; then
  78 +if [[ -z "$CONFIGURE_ONLY" ]]; then
79 79 echo "==> Make"
80   - gmake
  80 + V=1 gmake
81 81 else
82 82 echo "Not running make per-request"
83 83 fi
176 hw/virtio-net.c
@@ -26,6 +26,89 @@
26 26 #define MAC_TABLE_ENTRIES 64
27 27 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
28 28
  29 +/*
  30 + * Unfortunately some guest virtio drivers are a little racy with respect to
  31 + * when they notify us and when they unmask their respective interrupts.
  32 + * Currently we have to work around this in QEMU. While OSes normally work
  33 + * around pathological devices, virtual devices here will have to work around
  34 + * virtual hardware. To put this more concretely, a Linux guest will notify the
  35 + * host to do processing work before it unmasks interrupts. Therefore, by the
  36 + * time that we get to virtio_notify interrupts on the available ring won't be
  37 + * unmasked so we won't inject the interrupt, but the guest will instead wait
  38 + * indefinitely for one. This leads to us losing data.
  39 + *
  40 + * We need to note whether or not we injected an interrupt during a
  41 + * virtio_notify. If we did not and either of the following conditions about the
  42 + * ring buffers are true:
  43 + *
  44 + * o The last available index processed equals the used index
  45 + * o The last available index processed does not equal the current
  46 + * available index
  47 + *
  48 + * If this is the case, then we set up a small timer that runs for 500 ticks,
  49 + * each tick is 10ms long. If we reach 500 ticks, then we just ignore it. This
  50 + * is actually a valid position because the guest could have transmitted a small
  51 + * amount of packets, but not enough to actually cause it to need injection. If
  52 + * we get notified, aka hit virtio_net_handle_tx_timer, then we stop the timer,
  53 + * because we're about to do processing that may inject an interrupt. Finally,
  54 + * if on a tick we check two different conditions. The first is to see if the
  55 + * last processed available ring index is not equal to the current available
  56 + * ring index. If that is true, then we effectively call virtqueue_flush as
  57 + * virtio_net_tx_timer would. Finally we check if the last available ring index
  58 + * is equal to the used ring index and interrupts are not masked. If this is the
  59 + * case, then we simply inject the interrupt and continue.
  60 + *
  61 + * This is summarized by the following rough state transition diagram:
  62 + *
  63 + * Otherwise +---+
  64 + * virtqueue_ --+ increment +---* |
  65 + * flush() | tick count \|/ | + avail ring
  66 + * finishes | +-------------+ | | index >
  67 + * without +---*-------------------->| |--+ | last avail
  68 + * injecting| | Timer | | index pro-
  69 + * an intr. | +-----*-------------| Active | | cessed
  70 + * | | | | |-----*-----------+
  71 + * | | | +-------------+ |
  72 + * | | +- 500 ticks | | |
  73 + * | | elapse | *--+ Avail ring |
  74 + * | \|/ | | unmasked |
  75 + * +-------------+ | | |
  76 + * | |<--*-----------+ | +--------+ |
  77 + * | Timer | | | | | |
  78 + * | Inactive | +- virtio_net_ +---->| Inject | |
  79 + * | | handle_tx_ | MSI/x | |
  80 + * +-------------+ timer() runs | | |
  81 + * ^ ^ +--------+ |
  82 + * | | +- always | |
  83 + * | | | | |
  84 + * | +-----------------------*------------+ |
  85 + * | |
  86 + * | +- always +------------------+ |
  87 + * | | | | |
  88 + * +---------------*---------------| Flush Virtqueues |<-----+
  89 + * | |
  90 + * +------------------+
  91 + */
  92 +
  93 +
  94 +#define REINJECT_TICK_RATE (10000000) /* 10ms in ns */
  95 +#define REINJECT_DEADMAN 500 /* 5s in ticks */
  96 +
  97 +typedef enum rein_act {
  98 + REIN_INJECT,
  99 + REIN_DEADMAN,
  100 + REIN_RUN
  101 +} rein_act_t;
  102 +
  103 +#define REIN_RING_MAX 64
  104 +
  105 +typedef struct rein_event {
  106 + rein_act_t re_act;
  107 + hrtime_t re_time;
  108 + uint64_t re_other;
  109 + struct timeval re_tval;
  110 +} rein_event_t;
  111 +
29 112 typedef struct VirtIONet
30 113 {
31 114 VirtIODevice vdev;
@@ -63,8 +146,78 @@ typedef struct VirtIONet
63 146 } mac_table;
64 147 uint32_t *vlans;
65 148 DeviceState *qdev;
  149 + QEMUTimer *rein_timer;
  150 + uint32_t rein_timer_ticks;
  151 + uint8_t rein_timer_act;
  152 + uint32_t rein_ring_idx;
  153 + rein_event_t rein_ring[REIN_RING_MAX];
  154 + uint64_t rein_n_dead;
  155 + uint64_t rein_n_inject;
  156 + uint64_t rein_n_rerun;
66 157 } VirtIONet;
67 158
  159 +static void virtio_net_handle_tx_timer(VirtIODevice *, VirtQueue *);
  160 +
  161 +static void virtio_net_rein_event(VirtIONet *n, rein_act_t act, uint64_t other)
  162 +{
  163 + int index = n->rein_ring_idx;
  164 + n->rein_ring_idx = (n->rein_ring_idx + 1) % REIN_RING_MAX;
  165 + rein_event_t *rep = n->rein_ring + index;
  166 + rep->re_time = gethrtime();
  167 + rep->re_act = act;
  168 + rep->re_other = other;
  169 + (void) gettimeofday(&rep->re_tval, NULL);
  170 +}
  171 +
  172 +static void virtio_net_rein_disable(VirtIONet *n)
  173 +{
  174 + qemu_del_timer(n->rein_timer);
  175 + n->rein_timer_act = 0;
  176 +}
  177 +
  178 +static void virtio_net_rein_enable(VirtIONet *n)
  179 +{
  180 + n->rein_timer_ticks = 0;
  181 + qemu_mod_timer(n->rein_timer,
  182 + qemu_get_clock(vm_clock) + REINJECT_TICK_RATE);
  183 + n->rein_timer_act = 1;
  184 +}
  185 +
  186 +static void virtio_net_rein_tick(void *opaque)
  187 +{
  188 + int ret;
  189 + VirtIONet *n = opaque;
  190 + assert(n->rein_timer_act);
  191 +
  192 + n->rein_timer_ticks++;
  193 +
  194 + /* Give up, this may be completely reasonable */
  195 + if (n->rein_timer_ticks > REINJECT_DEADMAN) {
  196 + virtio_net_rein_event(n, REIN_DEADMAN, n->rein_timer_ticks);
  197 + virtio_net_rein_disable(n);
  198 + n->rein_n_dead++;
  199 + return;
  200 + }
  201 +
  202 + ret = virtqueue_stalled(n->tx_vq);
  203 + if (ret == 1) {
  204 + virtio_net_rein_event(n, REIN_INJECT, n->rein_timer_ticks);
  205 + virtio_net_rein_disable(n);
  206 + n->rein_n_inject++;
  207 + return;
  208 + } else if (ret == 2) {
  209 + virtio_net_rein_event(n, REIN_RUN, n->rein_timer_ticks);
  210 + virtio_net_rein_disable(n);
  211 + virtio_net_handle_tx_timer(&n->vdev, n->tx_vq);
  212 + n->rein_n_rerun++;
  213 + return;
  214 + }
  215 +
  216 + assert(ret == 0);
  217 + qemu_mod_timer(n->rein_timer,
  218 + qemu_get_clock(vm_clock) + REINJECT_TICK_RATE);
  219 +}
  220 +
68 221 /* TODO
69 222 * - we could suppress RX interrupt if we were so inclined.
70 223 */
@@ -707,6 +860,7 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
707 860 {
708 861 VirtQueueElement elem;
709 862 int32_t num_packets = 0;
  863 + int32_t inject = 1;
710 864 if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
711 865 return num_packets;
712 866 }
@@ -758,12 +912,16 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
758 912 len += ret;
759 913
760 914 virtqueue_push(vq, &elem, len);
761   - virtio_notify(&n->vdev, vq);
  915 + inject = virtio_notify(&n->vdev, vq);
762 916
763 917 if (++num_packets >= n->tx_burst) {
764 918 break;
765 919 }
766 920 }
  921 +
  922 + if (inject == 0 && virtqueue_handled(vq))
  923 + virtio_net_rein_enable(n);
  924 +
767 925 return num_packets;
768 926 }
769 927
@@ -777,6 +935,16 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
777 935 return;
778 936 }
779 937
  938 + /*
  939 + * Kill the broken guest timer. The reason we are here is because the guest
  940 + * has kicked us to send packets therefore we don't need to go back and
  941 + * consider injecting it with interrupts because we will do that again
  942 + * naturally. We also don't reset
  943 + */
  944 + if (n->rein_timer_act)
  945 + virtio_net_rein_disable(n);
  946 +
  947 +
780 948 if (n->tx_waiting) {
781 949 virtio_queue_set_notification(vq, 1);
782 950 qemu_del_timer(n->tx_timer);
@@ -1024,6 +1192,12 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
1024 1192 n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx_bh);
1025 1193 n->tx_bh = qemu_bh_new(virtio_net_tx_bh, n);
1026 1194 }
  1195 + n->rein_timer = qemu_new_timer(vm_clock, virtio_net_rein_tick, n);
  1196 + n->rein_ring_idx = 0;
  1197 + bzero(n->rein_ring, sizeof (rein_event_t) * REIN_RING_MAX);
  1198 + n->rein_n_dead = 0;
  1199 + n->rein_n_inject = 0;
  1200 + n->rein_n_rerun = 0;
1027 1201 n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
1028 1202 qemu_macaddr_default_if_unset(&conf->macaddr);
1029 1203 memcpy(&n->mac[0], &conf->macaddr, sizeof(n->mac));
85 hw/virtio.c
@@ -17,20 +17,12 @@
17 17 #include "qemu-error.h"
18 18 #include "virtio.h"
19 19 #include "sysemu.h"
  20 +#include "qemu-barrier.h"
20 21
21 22 /* The alignment to use between consumer and producer parts of vring.
22 23 * x86 pagesize again. */
23 24 #define VIRTIO_PCI_VRING_ALIGN 4096
24 25
25   -/* QEMU doesn't strictly need write barriers since everything runs in
26   - * lock-step. We'll leave the calls to wmb() in though to make it obvious for
27   - * KVM or if kqemu gets SMP support.
28   - * In any case, we must prevent the compiler from reordering the code.
29   - * TODO: we likely need some rmb()/mb() as well.
30   - */
31   -
32   -#define wmb() __asm__ __volatile__("": : :"memory")
33   -
34 26 typedef struct VRingDesc
35 27 {
36 28 uint64_t addr;
@@ -169,6 +161,13 @@ static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
169 161 stw_phys(pa, vring_used_idx(vq) + val);
170 162 }
171 163
  164 +static inline uint16_t vring_used_flags(VirtQueue *vq)
  165 +{
  166 + target_phys_addr_t pa;
  167 + pa = vq->vring.used + offsetof(VRingUsed, flags);
  168 + return lduw_phys(pa);
  169 +}
  170 +
172 171 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
173 172 {
174 173 target_phys_addr_t pa;
@@ -235,7 +234,7 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
235 234 void virtqueue_flush(VirtQueue *vq, unsigned int count)
236 235 {
237 236 /* Make sure buffer is written before we update index. */
238   - wmb();
  237 + smp_wmb();
239 238 trace_virtqueue_flush(vq, count);
240 239 vring_used_idx_increment(vq, count);
241 240 vq->inuse -= count;
@@ -258,6 +257,11 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
258 257 idx, vring_avail_idx(vq));
259 258 exit(1);
260 259 }
  260 + /* On success, callers read a descriptor at vq->last_avail_idx.
  261 + * Make sure descriptor read does not bypass avail index read. */
  262 + if (num_heads) {
  263 + smp_rmb();
  264 + }
261 265
262 266 return num_heads;
263 267 }
@@ -291,7 +295,7 @@ static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
291 295 /* Check they're not leading us off end of descriptors. */
292 296 next = vring_desc_next(desc_pa, i);
293 297 /* Make sure compiler knows to grab that: we don't want it changing! */
294   - wmb();
  298 + smp_wmb();
295 299
296 300 if (next >= max) {
297 301 error_report("Desc next is %u", next);
@@ -629,17 +633,20 @@ void virtio_irq(VirtQueue *vq)
629 633 virtio_notify_vector(vq->vdev, vq->vector);
630 634 }
631 635
632   -void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
  636 +int virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
633 637 {
  638 + /* We need to expose used array entries before checking used event. */
  639 + smp_mb();
634 640 /* Always notify when queue is empty (when feature acknowledge) */
635 641 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
636 642 (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
637 643 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
638   - return;
  644 + return 0;
639 645
640 646 trace_virtio_notify(vdev, vq);
641 647 vdev->isr |= 0x01;
642 648 virtio_notify_vector(vdev, vq->vector);
  649 + return 1;
643 650 }
644 651
645 652 void virtio_notify_config(VirtIODevice *vdev)
@@ -880,3 +887,55 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
880 887 {
881 888 return &vq->host_notifier;
882 889 }
  890 +
  891 +int virtqueue_handled(VirtQueue *vq)
  892 +{
  893 + smp_mb();
  894 + return (vq->last_avail_idx == vring_used_idx(vq) ||
  895 + vq->last_avail_idx != vring_avail_idx(vq));
  896 +}
  897 +
  898 +/*
  899 + * We need to go through and check if we have hit the 'stalled' condition.
  900 + * Due to the way that the virtio driver is implemented in the Linux kernel, it
  901 + * will potentially kick the guest to process data, disable the queue, but not
  902 + * enable interrupts before the host is done processing packets. When this
  903 + * happens all network traffic from the guest ends up getting corked up because
  904 + * the guest disabled the queue and is waiting for an interrupt from the host to
  905 + * go and enable it again. In fact, when in this state a little bit of libproc
  906 + * magic gets us going again rather reliably.
  907 + *
  908 + * Eventually the guest will go through and unmask interrupts saying that it
  909 + * wants an injection. If we reach a point in time where the last seen available
  910 + * index is equal to the available index ring and is equal to the used index
  911 + * ring, then we'll go ahead and install the interupt.
  912 + */
  913 +int virtqueue_stalled(VirtQueue *vq)
  914 +{
  915 + smp_mb();
  916 +
  917 + if (vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT)
  918 + return (0);
  919 +
  920 + if (vring_used_flags(vq) & VRING_USED_F_NO_NOTIFY)
  921 + return (0);
  922 +
  923 + if (vq->inuse)
  924 + return (0);
  925 +
  926 + /* We could have also lost the interrupt the other way */
  927 + if (vq->last_avail_idx != vring_avail_idx(vq))
  928 + return (2);
  929 +
  930 + if (vq->last_avail_idx != vring_used_idx(vq))
  931 + return (0);
  932 +
  933 + /*
  934 + * Interrupts are enabled and we're at a point in time where we would
  935 + * have stalled. Let's go ahead and inject the interrupt.
  936 + */
  937 + trace_virtio_notify(vq->vdev, vq);
  938 + vq->vdev->isr |= 0x01;
  939 + virtio_notify_vector(vq->vdev, vq->vector);
  940 + return (1);
  941 +}
4 hw/virtio.h
@@ -153,7 +153,7 @@ void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
153 153 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
154 154 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
155 155
156   -void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
  156 +int virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
157 157
158 158 void virtio_save(VirtIODevice *vdev, QEMUFile *f);
159 159
@@ -226,4 +226,6 @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
226 226 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
227 227 void virtio_queue_notify_vq(VirtQueue *vq);
228 228 void virtio_irq(VirtQueue *vq);
  229 +int virtqueue_stalled(VirtQueue *vq);
  230 +int virtqueue_handled(VirtQueue *vq);
229 231 #endif
61 qemu-barrier.h
... ... @@ -1,10 +1,65 @@
1 1 #ifndef __QEMU_BARRIER_H
2 2 #define __QEMU_BARRIER_H 1
3 3
4   -/* FIXME: arch dependant, x86 version */
5   -#define smp_wmb() asm volatile("" ::: "memory")
6   -
7 4 /* Compiler barrier */
8 5 #define barrier() asm volatile("" ::: "memory")
9 6
  7 +#if defined(__i386__)
  8 +
  9 +/*
  10 + * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
  11 + * on x86(well, a compiler barrier only). Well, at least as long as
  12 + * qemu doesn't do accesses to write-combining memory or non-temporal
  13 + * load/stores from C code.
  14 + */
  15 +#define smp_wmb() barrier()
  16 +#define smp_rmb() barrier()
  17 +/*
  18 + * We use GCC builtin if it's available, as that can use
  19 + * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
  20 + * However, on i386, there seem to be known bugs as recently as 4.3.
  21 + * */
  22 +#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
  23 +#define smp_mb() __sync_synchronize()
  24 +#else
  25 +#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
  26 +#endif
  27 +
  28 +#elif defined(__x86_64__)
  29 +
  30 +#define smp_wmb() barrier()
  31 +#define smp_rmb() barrier()
  32 +#define smp_mb() asm volatile("mfence" ::: "memory")
  33 +
  34 +#elif defined(_ARCH_PPC)
  35 +
  36 +/*
  37 + * We use an eieio() for wmb() on powerpc. This assumes we don't
  38 + * need to order cacheable and non-cacheable stores with respect to
  39 + * each other
  40 + */
  41 +#define smp_wmb() asm volatile("eieio" ::: "memory")
  42 +
  43 +#if defined(__powerpc64__)
  44 +#define smp_rmb() asm volatile("lwsync" ::: "memory")
  45 +#else
  46 +#define smp_rmb() asm volatile("sync" ::: "memory")
  47 +#endif
  48 +
  49 +#define smp_mb() asm volatile("sync" ::: "memory")
  50 +
  51 +#else
  52 +
  53 +/*
  54 + * For (host) platforms we don't have explicit barrier definitions
  55 + * for, we use the gcc __sync_synchronize() primitive to generate a
  56 + * full barrier. This should be safe on all platforms, though it may
  57 + * be overkill for wmb() and rmb().
  58 + */
  59 +#define smp_wmb() __sync_synchronize()
  60 +#define smp_mb() __sync_synchronize()
  61 +#define smp_rmb() __sync_synchronize()
  62 +
  63 +#endif
  64 +
10 65 #endif
117 qemu_mdb.c
@@ -114,6 +114,69 @@ typedef struct VRing
114 114 target_phys_addr_t used;
115 115 } VRing;
116 116
  117 +/* Sigh More definitions ... */
  118 +typedef enum rein_act {
  119 + REIN_INJECT,
  120 + REIN_DEADMAN,
  121 + REIN_RUN
  122 +} rein_act_t;
  123 +
  124 +#define REIN_RING_MAX 64
  125 +
  126 +typedef struct rein_event {
  127 + rein_act_t re_act;
  128 + hrtime_t re_time;
  129 + uint64_t re_other;
  130 + struct timeval re_tval;
  131 +} rein_event_t;
  132 +
  133 +typedef struct VirtIONet
  134 +{
  135 + VirtIODevice vdev;
  136 + uint8_t mac[ETH_ALEN];
  137 + uint16_t status;
  138 + VirtQueue *rx_vq;
  139 + VirtQueue *tx_vq;
  140 + VirtQueue *ctrl_vq;
  141 + NICState *nic;
  142 + QEMUTimer *tx_timer;
  143 + QEMUBH *tx_bh;
  144 + uint32_t tx_timeout;
  145 + int32_t tx_burst;
  146 + int tx_waiting;
  147 + uint32_t has_vnet_hdr;
  148 + uint8_t has_ufo;
  149 + struct {
  150 + VirtQueueElement elem;
  151 + ssize_t len;
  152 + } async_tx;
  153 + int mergeable_rx_bufs;
  154 + uint8_t promisc;
  155 + uint8_t allmulti;
  156 + uint8_t alluni;
  157 + uint8_t nomulti;
  158 + uint8_t nouni;
  159 + uint8_t nobcast;
  160 + uint8_t vhost_started;
  161 + struct {
  162 + int in_use;
  163 + int first_multi;
  164 + uint8_t multi_overflow;
  165 + uint8_t uni_overflow;
  166 + uint8_t *macs;
  167 + } mac_table;
  168 + uint32_t *vlans;
  169 + DeviceState *qdev;
  170 + QEMUTimer *rein_timer;
  171 + uint32_t rein_timer_ticks;
  172 + uint8_t rein_timer_act;
  173 + uint32_t rein_ring_idx;
  174 + rein_event_t rein_ring[REIN_RING_MAX];
  175 + uint64_t rein_n_dead;
  176 + uint64_t rein_n_inject;
  177 + uint64_t rein_n_rerun;
  178 +} VirtIONet;
  179 +
117 180 /*
118 181 * NDEVICES comes from the PCIDevice structure and should be changed if this
119 182 * does ever change.
@@ -624,6 +687,58 @@ qemu_mdb_vravail(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
624 687 return (DCMD_OK);
625 688 }
626 689
  690 +static const char *reintostr[] = {
  691 + "INJECT",
  692 + "DEADMAN",
  693 + "RUN"
  694 +};
  695 +
  696 +static int
  697 +qemu_mdb_nic_reinject(uintptr_t addr, uint_t flags, int argc,
  698 + const mdb_arg_t *argv)
  699 +{
  700 + VirtIONet *n;
  701 + uint32_t ii, end;
  702 + rein_event_t *rep;
  703 +
  704 + if (!(flags & DCMD_ADDRSPEC))
  705 + return (DCMD_USAGE);
  706 +
  707 + if (argc > 1)
  708 + return (DCMD_USAGE);
  709 +
  710 + n = mdb_alloc(sizeof (VirtIONet), UM_SLEEP | UM_GC);
  711 +
  712 + if (mdb_vread(n, sizeof (VirtIONet), addr) != sizeof (VirtIONet)) {
  713 + mdb_warn("failed to read VirtIONet");
  714 + return (DCMD_ERR);
  715 + }
  716 +
  717 + if (n->rein_ring_idx == 0)
  718 + end = REIN_RING_MAX;
  719 + else
  720 + end = n->rein_ring_idx - 1;
  721 +
  722 + mdb_printf("%-?s %-10s %s\n", "TIMESTAMP", "ACTION", "OTHER");
  723 + ii = n->rein_ring_idx;
  724 + for (;;) {
  725 + rep = n->rein_ring + ii;
  726 + if (rep->re_time == 0 && rep->re_other == 0)
  727 + break;
  728 +
  729 + mdb_printf("%-?p %-10s ", rep->re_time, reintostr[rep->re_act]);
  730 + if (rep->re_other == 0)
  731 + mdb_printf("\n", " - ");
  732 + else
  733 + mdb_printf("%d\n", rep->re_other);
  734 + if (ii + 1 == end)
  735 + break;
  736 + ii = (ii + 1) % REIN_RING_MAX;
  737 + }
  738 +
  739 + return (DCMD_OK);
  740 +}
  741 +
627 742 static const mdb_dcmd_t qemu_dcmds[] = {
628 743 { "pcidev2virtio", NULL, "translate a virtio PCI device to its "
629 744 "virtio equivalent", qemu_mdb_pcidev2virtio },
@@ -633,6 +748,8 @@ static const mdb_dcmd_t qemu_dcmds[] = {
633 748 qemu_mdb_vrused },
634 749 { "qemu_vravail", NULL, "Spit out the avail event of the vring",
635 750 qemu_mdb_vravail },
  751 + { "qemu_nic_reinject", NULL, "Print all of the reinject events",
  752 + qemu_mdb_nic_reinject },
636 753 { NULL }
637 754 };
638 755

0 comments on commit 6d85df9

Please sign in to comment.
Something went wrong with that request. Please try again.