Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

I ♥♥♥ luuv ♥♥♥ cstyle

Signed-off-by: Alexey Zaytsev <alexey.zaytsev@gmail.com>
  • Loading branch information...
commit b64110f3ec1bcde4a28fc092dd305776e4767b38 1 parent 44d24f9
@xl0 authored
View
16 virtio/util.c
@@ -6,8 +6,8 @@
#include <sys/sunndi.h>
#include "util.h"
-/*
- * Add to ddi?
+/*
+ * Add to ddi?
*/
void
dev_err(dev_info_t *dip, int ce, char *fmt, ...)
@@ -25,7 +25,8 @@ dev_err(dev_info_t *dip, int ce, char *fmt, ...)
ddi_get_instance(dip), buf);
}
-void dev_panic(dev_info_t *dip, char *fmt, ...)
+void
+dev_panic(dev_info_t *dip, char *fmt, ...)
{
va_list ap;
char buf[256];
@@ -40,7 +41,8 @@ void dev_panic(dev_info_t *dip, char *fmt, ...)
ddi_get_instance(dip), buf);
}
-void hex_dump(char *prefix, void *addr, int len)
+void
+hex_dump(char *prefix, void *addr, int len)
{
unsigned char *base = addr;
char buff[256], *bptr;
@@ -55,11 +57,13 @@ void hex_dump(char *prefix, void *addr, int len)
i++;
if (!(i % 16)) {
- cmn_err(CE_NOTE, "%s: 0x%p: %s", prefix, base + i - 16, buff);
+ cmn_err(CE_NOTE, "%s: 0x%p: %s",
+ prefix, base + i - 16, buff);
bptr = buff;
}
}
if (i % 16)
- cmn_err(CE_NOTE, "%s: 0x%p: %s", prefix, base + i - (i % 16), buff);
+ cmn_err(CE_NOTE, "%s: 0x%p: %s",
+ prefix, base + i - (i % 16), buff);
}
View
20 virtio/util.h
@@ -7,20 +7,12 @@ void dev_panic(dev_info_t *dip, char *fmt, ...);
void hex_dump(char *prefix, void *addr, int len);
-static inline int up_to_power2(int i) {
-
- if (ISP2(i))
- return i;
-
- /* Highbit returns h+1. */
- return (1 << (highbit(i) - 1));
-}
-
-/*
+/*
* Stolen from the Linux kernel! Will find a BSD one, but pls don't
* sue us yet. ;)
*/
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
-
+#define container_of(ptr, type, member) ( \
+{ \
+ const typeof(((type *)0)->member) *__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); \
+})
View
235 virtio/virtio.c
@@ -1,5 +1,30 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
-/* Heavily based on the NetBSD virtio driver by Minoura Makoto. */
+/* Based on the NetBSD virtio driver by Minoura Makoto. */
/*
* Copyright (c) 2010 Minoura Makoto.
* All rights reserved.
@@ -47,14 +72,8 @@
#include "util.h"
#include "virtiovar.h"
#include "virtioreg.h"
-#define NDEVNAMES (sizeof(virtio_device_name)/sizeof(char*))
-#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
-#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
- ~(VIRTIO_PAGE_SIZE-1))
-
-/*
- * Declarations
- */
+#define NDEVNAMES (sizeof (virtio_device_name) / sizeof (char *))
+#define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
void
virtio_init(struct virtio_softc *sc)
@@ -69,7 +88,8 @@ virtio_set_status(struct virtio_softc *sc, int status)
if (status != 0)
old = ddi_get8(sc->sc_ioh,
- (uint8_t *) (sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS));
+ (uint8_t *) (sc->sc_io_addr +
+ VIRTIO_CONFIG_DEVICE_STATUS));
ddi_put8(sc->sc_ioh,
(uint8_t *) (sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS),
@@ -115,7 +135,7 @@ virtio_show_features(struct virtio_softc *sc, uint32_t features,
buf += snprintf(buf, bufend - buf, ") ");
- return buf - orig_buf;
+ return (buf - orig_buf);
}
boolean_t
@@ -155,7 +175,7 @@ virtio_read_device_config_8(struct virtio_softc *sc, int index)
r = ddi_get32(sc->sc_ioh,
(uint32_t *) (sc->sc_io_addr + sc->sc_config_offset +
- index + sizeof(uint32_t)));
+ index + sizeof (uint32_t)));
r <<= 32;
r += ddi_get32(sc->sc_ioh,
(uint32_t *) (sc->sc_io_addr + sc->sc_config_offset + index));
@@ -176,8 +196,8 @@ virtio_write_device_config_2(struct virtio_softc *sc,
int index, uint16_t value)
{
ddi_put16(sc->sc_ioh,
- (uint16_t *) (sc->sc_io_addr + sc->sc_config_offset + index),
- value);
+ (uint16_t *) (sc->sc_io_addr + sc->sc_config_offset + index),
+ value);
}
void
@@ -185,21 +205,20 @@ virtio_write_device_config_4(struct virtio_softc *sc,
int index, uint32_t value)
{
ddi_put32(sc->sc_ioh,
- (uint32_t *) (sc->sc_io_addr + sc->sc_config_offset + index),
- value);
+ (uint32_t *) (sc->sc_io_addr + sc->sc_config_offset + index),
+ value);
}
void
virtio_write_device_config_8(struct virtio_softc *sc,
- int index, uint64_t value)
+ int index, uint64_t value)
{
ddi_put32(sc->sc_ioh,
- (uint32_t *) (sc->sc_io_addr + sc->sc_config_offset + index),
- value & 0xFFFFFFFF);
+ (uint32_t *) (sc->sc_io_addr + sc->sc_config_offset + index),
+ value & 0xFFFFFFFF);
ddi_put32(sc->sc_ioh,
- (uint32_t *) (sc->sc_io_addr + sc->sc_config_offset+
- index + sizeof(uint32_t)),
- value >> 32);
+ (uint32_t *) (sc->sc_io_addr + sc->sc_config_offset +
+ index + sizeof (uint32_t)), value >> 32);
}
/*
@@ -248,7 +267,7 @@ virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
int vq_size = vq->vq_num;
/* free slot management */
- list_create(&vq->vq_freelist, sizeof(struct vq_entry),
+ list_create(&vq->vq_freelist, sizeof (struct vq_entry),
offsetof(struct vq_entry, qe_list));
for (i = 0; i < vq_size; i++) {
@@ -288,21 +307,22 @@ virtio_alloc_vq(struct virtio_softc *sc,
size_t len;
struct virtqueue *vq;
-#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
- ~(VIRTIO_PAGE_SIZE-1))
+#define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
+ ~(VIRTIO_PAGE_SIZE-1))
ddi_put16(sc->sc_ioh,
- (uint16_t *) (sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
+ (uint16_t *) (sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
+ index);
vq_size = ddi_get16(sc->sc_ioh,
(uint16_t *) (sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
if (vq_size == 0) {
dev_err(sc->sc_dev, CE_WARN,
- "virtqueue dest not exist, index %d for %s\n",
- index, name);
+ "virtqueue dest not exist, index %d for %s\n",
+ index, name);
goto out;
}
- vq = kmem_zalloc(sizeof(struct virtqueue), KM_SLEEP);
+ vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
if (!vq)
goto out;
@@ -311,15 +331,15 @@ virtio_alloc_vq(struct virtio_softc *sc,
vq_size = MIN(vq_size, size);
/* allocsize1: descriptor table + avail ring + pad */
- allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc) * vq_size
- + sizeof(struct vring_avail) +
- + sizeof(uint16_t) * vq_size);
+ allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size
+ + sizeof (struct vring_avail) +
+ + sizeof (uint16_t) * vq_size);
/* allocsize2: used ring + pad */
- allocsize2 = VIRTQUEUE_ALIGN(sizeof(struct vring_used)
- + sizeof(struct vring_used_elem) * vq_size);
- /* allocsize3: indirect table */
+ allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used)
+ + sizeof (struct vring_used_elem) * vq_size);
+ /* allocsize3: indirect table */
if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
- allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
+ allocsize3 = sizeof (struct vring_desc) * maxnsegs * vq_size;
else
sc->sc_indirect = allocsize3 = 0;
@@ -345,7 +365,8 @@ virtio_alloc_vq(struct virtio_softc *sc,
memset(vq->vq_vaddr, 0, allocsize);
r = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
- (caddr_t) vq->vq_vaddr, allocsize, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
+ (caddr_t) vq->vq_vaddr, allocsize,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
if (r != DDI_DMA_MAPPED) {
dev_err(sc->sc_dev, CE_WARN,
@@ -360,30 +381,30 @@ virtio_alloc_vq(struct virtio_softc *sc,
/* set the vq address */
ddi_put32(sc->sc_ioh,
(uint32_t *) (sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
- (vq->vq_dma_cookie.dmac_address / VIRTIO_PAGE_SIZE));
+ (vq->vq_dma_cookie.dmac_address / VIRTIO_PAGE_SIZE));
/* remember addresses and offsets for later use */
vq->vq_owner = sc;
vq->vq_num = vq_size;
vq->vq_index = index;
vq->vq_descs = vq->vq_vaddr;
- vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
- vq->vq_avail = (void*)(((char*)vq->vq_descs) + vq->vq_availoffset);
+ vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
+ vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
vq->vq_usedoffset = allocsize1;
- vq->vq_used = (void*)(((char*)vq->vq_descs) + vq->vq_usedoffset);
+ vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
vq->vq_maxnsegs = maxnsegs;
if (sc->sc_indirect) {
vq->vq_indirectoffset = allocsize1 + allocsize2;
- vq->vq_indirect = (void*)(((char*)vq->vq_descs) +
- vq->vq_indirectoffset);
+ vq->vq_indirect = (void *)(((char *)vq->vq_descs) +
+ vq->vq_indirectoffset);
} else {
vq->vq_indirect = NULL;
}
/* free slot management */
- vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
- KM_NOSLEEP);
+ vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
+ KM_NOSLEEP);
if (!vq->vq_entries) {
dev_err(sc->sc_dev, CE_NOTE,
"Failed to allocate slow array for vq %d", index);
@@ -393,12 +414,12 @@ virtio_alloc_vq(struct virtio_softc *sc,
virtio_init_vq(sc, vq);
dev_err(sc->sc_dev, CE_NOTE,
- "allocated %u bytes for virtqueue %d (%s), "
- "size %d", allocsize, index, name, vq_size);
+ "allocated %u bytes for virtqueue %d (%s), "
+ "size %d", allocsize, index, name, vq_size);
if (sc->sc_indirect) {
dev_err(sc->sc_dev, CE_NOTE,
"using %d bytes (%d entries) of indirect descriptors",
- allocsize3, maxnsegs * vq_size);
+ allocsize3, maxnsegs * vq_size);
}
return (vq);
@@ -410,7 +431,7 @@ virtio_alloc_vq(struct virtio_softc *sc,
out_alloc:
ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
- kmem_free(vq, sizeof(struct virtqueue));
+ kmem_free(vq, sizeof (struct virtqueue));
out:
return (NULL);
}
@@ -428,7 +449,7 @@ virtio_free_vq(struct virtqueue *vq)
ddi_put32(sc->sc_ioh,
(uint32_t *) (sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
- kmem_free(vq->vq_entries, sizeof(struct vq_entry) * vq->vq_num);
+ kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
ddi_dma_unbind_handle(vq->vq_dma_handle);
ddi_dma_mem_free(&vq->vq_dma_acch);
@@ -436,7 +457,7 @@ virtio_free_vq(struct virtqueue *vq)
mutex_destroy(&vq->vq_freelist_lock);
- kmem_free(vq, sizeof(struct virtqueue));
+ kmem_free(vq, sizeof (struct virtqueue));
}
/*
@@ -458,7 +479,7 @@ vq_alloc_entry(struct virtqueue *vq)
qe->qe_next = NULL;
qe->ind_next = NULL;
- memset(qe->qe_desc, 0, sizeof(struct vring_desc));
+ memset(qe->qe_desc, 0, sizeof (struct vring_desc));
return (qe);
}
@@ -477,7 +498,7 @@ virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len, bool write)
qe->qe_desc->addr = paddr;
qe->qe_desc->len = len;
- /* 'write' - from the driver's point of view*/
+ /* 'write' - from the driver's point of view */
if (!write) {
qe->qe_desc->flags |= VRING_DESC_F_WRITE;
}
@@ -494,9 +515,9 @@ virtio_ve_set_indirect(struct vq_entry *qe, int nsegs, bool write)
qe->qe_desc->addr = vq->vq_dma_cookie.dmac_address +
vq->vq_indirectoffset;
- qe->qe_desc->addr += sizeof(struct vring_desc) *
+ qe->qe_desc->addr += sizeof (struct vring_desc) *
vq->vq_maxnsegs * qe->qe_index;
- qe->qe_desc->len = sizeof(struct vring_desc) * nsegs;
+ qe->qe_desc->len = sizeof (struct vring_desc) * nsegs;
qe->qe_desc->flags = write ? 0 : VRING_DESC_F_WRITE;
qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
qe->ind_next = vq->vq_indirect;
@@ -505,7 +526,7 @@ virtio_ve_set_indirect(struct vq_entry *qe, int nsegs, bool write)
void
virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
- ddi_dma_cookie_t dma_cookie, unsigned int ncookies, bool write)
+ ddi_dma_cookie_t dma_cookie, unsigned int ncookies, bool write)
{
uint16_t flags = write ? 0 : VRING_DESC_F_WRITE;
int i;
@@ -524,7 +545,7 @@ virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
void
virtio_ve_add_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
- bool write)
+ bool write)
{
uint16_t flags = write ? 0 : VRING_DESC_F_WRITE;
@@ -544,7 +565,7 @@ virtio_notify(struct virtqueue *vq)
/* Find out if we need to notify the device. */
ddi_dma_sync(vq->vq_dma_handle, vq->vq_usedoffset,
- sizeof(struct vring_used), DDI_DMA_SYNC_FORKERNEL);
+ sizeof (struct vring_used), DDI_DMA_SYNC_FORKERNEL);
if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
ddi_put16(vsc->sc_ioh,
@@ -565,22 +586,24 @@ virtio_sync_vq(struct virtqueue *vq)
/* Sync the part of the ring that has been filled. */
/* XXX worth the trouble? Maybe just sync the whole mapping? */
(void) ddi_dma_sync(vq->vq_dma_handle,
- vq->vq_availoffset + sizeof(struct vring_avail) +
- ((sizeof(uint16_t) * vq->vq_avail->idx )),
- sizeof(uint16_t) * (vq->vq_avail_idx - vq->vq_avail->idx),
+ vq->vq_availoffset + sizeof (struct vring_avail) +
+ ((sizeof (uint16_t) * vq->vq_avail->idx)),
+ sizeof (uint16_t) * (vq->vq_avail_idx - vq->vq_avail->idx),
DDI_DMA_SYNC_FORDEV);
- /* Yes, we need to make sure the device sees the idx update after
- * it sees the ring update. */
+ /*
+ * Yes, we need to make sure the device sees the idx update after
+ * it sees the ring update.
+ */
vq->vq_avail->idx = vq->vq_avail_idx;
/* Sync the idx and flags */
(void) ddi_dma_sync(vq->vq_dma_handle, vq->vq_availoffset,
- sizeof(struct vring_avail), DDI_DMA_SYNC_FORDEV);
+ sizeof (struct vring_avail), DDI_DMA_SYNC_FORDEV);
if (vq->vq_indirect)
(void) ddi_dma_sync(vq->vq_dma_handle, vq->vq_indirectoffset,
- sizeof(struct vring_desc) * vq->vq_maxnsegs * vq->vq_num,
+ sizeof (struct vring_desc) * vq->vq_maxnsegs * vq->vq_num,
DDI_DMA_SYNC_FORDEV);
virtio_notify(vq);
@@ -595,8 +618,10 @@ virtio_push_chain(struct vq_entry *qe, boolean_t sync)
ASSERT(qe);
- /* Bind the descs together, paddr and len should be already
- * set with virtio_ve_set */
+ /*
+ * Bind the descs together, paddr and len should be already
+ * set with virtio_ve_set
+ */
do {
if (qe->qe_next) {
qe->qe_desc->flags |= VRING_DESC_F_NEXT;
@@ -630,13 +655,15 @@ virtio_pull_chain(struct virtqueue *vq, size_t *len)
int slot;
int usedidx;
- /* Sync idx (and flags), but only if we don't have any backlog
- * from the previous sync. */
+ /*
+ * Sync idx (and flags), but only if we don't have any backlog
+ * from the previous sync.
+ */
if (vq->vq_used_idx == vq->vq_used->idx) {
ddi_dma_sync(vq->vq_dma_handle, vq->vq_usedoffset,
- sizeof(struct vring_used), DDI_DMA_SYNC_FORKERNEL);
+ sizeof (struct vring_used), DDI_DMA_SYNC_FORKERNEL);
- /* Still nothing? Bye.*/
+ /* Still nothing? Bye. */
if (vq->vq_used_idx == vq->vq_used->idx)
return (NULL);
}
@@ -648,25 +675,25 @@ virtio_pull_chain(struct virtqueue *vq, size_t *len)
/* Sync the ring entry */
ddi_dma_sync(vq->vq_dma_handle,
- vq->vq_usedoffset + sizeof(struct vring_used) +
- sizeof(struct vring_used_elem) * usedidx,
- sizeof(struct vring_used_elem), DDI_DMA_SYNC_FORKERNEL);
+ vq->vq_usedoffset + sizeof (struct vring_used) +
+ sizeof (struct vring_used_elem) * usedidx,
+ sizeof (struct vring_used_elem), DDI_DMA_SYNC_FORKERNEL);
slot = vq->vq_used->ring[usedidx].id;
*len = vq->vq_used->ring[usedidx].len;
/* And the descriptor */
ddi_dma_sync(vq->vq_dma_handle,
- sizeof(struct vring_desc) * slot,
- sizeof(struct vring_desc), DDI_DMA_SYNC_FORKERNEL);
+ sizeof (struct vring_desc) * slot,
+ sizeof (struct vring_desc), DDI_DMA_SYNC_FORKERNEL);
head = tmp = &vq->vq_entries[slot];
- /* Sync the rest of the chain*/
+ /* Sync the rest of the chain */
while (tmp->qe_next) {
tmp = tmp->qe_next;
ddi_dma_sync(vq->vq_dma_handle,
- sizeof(struct vring_desc) * tmp->qe_index,
- sizeof(struct vring_desc), DDI_DMA_SYNC_FORKERNEL);
+ sizeof (struct vring_desc) * tmp->qe_index,
+ sizeof (struct vring_desc), DDI_DMA_SYNC_FORKERNEL);
}
return (head);
@@ -727,12 +754,14 @@ static int virtio_register_msi(struct virtio_softc *sc,
goto out_nomsi;
}
- /* Those who try to register more handlers then the device
- * supports shall suffer. */
+ /*
+ * Those who try to register more handlers then the device
+ * supports shall suffer.
+ */
ASSERT(handler_count <= count);
sc->sc_intr_htable = kmem_zalloc(
- sizeof(ddi_intr_handle_t) * handler_count,
+ sizeof (ddi_intr_handle_t) * handler_count,
KM_SLEEP);
if (!sc->sc_intr_htable) {
dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI handles");
@@ -740,7 +769,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
}
ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
- handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
+ handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
if (ret) {
dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d",
ret);
@@ -801,8 +830,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
if (ret)
sc->sc_intr_cap = 0;
- /* Enable the iterrupts. Either the whole block, or
- * one by one. */
+ /* Enable the iterrupts. Either the whole block, or one by one. */
if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
ret = ddi_intr_block_enable(sc->sc_intr_htable,
sc->sc_intr_num);
@@ -847,9 +875,8 @@ static int virtio_register_msi(struct virtio_softc *sc,
(uint16_t *) (sc->sc_io_addr +
VIRTIO_CONFIG_QUEUE_VECTOR));
if (check != i) {
- dev_err(sc->sc_dev, CE_WARN, "Failed to bind haneler for"
- "VQ %d, MSI %d. Check = %x",
- i, i, check);
+ dev_err(sc->sc_dev, CE_WARN, "Failed to bind haneler"
+ "for VQ %d, MSI %d. Check = %x", i, i, check);
ret = ENODEV;
goto out_bind;
}
@@ -865,8 +892,8 @@ static int virtio_register_msi(struct virtio_softc *sc,
(uint16_t *) (sc->sc_io_addr +
VIRTIO_CONFIG_CONFIG_VECTOR));
if (check != i) {
- dev_err(sc->sc_dev, CE_WARN, "Failed to bind haneler for"
- "Config updates, MSI %d", i);
+ dev_err(sc->sc_dev, CE_WARN, "Failed to bind haneler "
+ "for Config updates, MSI %d", i);
ret = ENODEV;
goto out_bind;
}
@@ -903,7 +930,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
for (i = 0; i < actual; i++)
ddi_intr_free(sc->sc_intr_htable[i]);
out_msi_alloc:
- kmem_free(sc->sc_intr_htable, sizeof(ddi_intr_handle_t) * count);
+ kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
out_nomsi:
return (ret);
@@ -915,7 +942,8 @@ struct virtio_handler_container {
struct virtio_int_handler vq_handlers[];
};
-uint_t virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
+uint_t
+virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
{
struct virtio_softc *sc = (void *) arg1;
struct virtio_handler_container *vhc = (void *) arg2;
@@ -941,7 +969,7 @@ uint_t virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
vhc->vq_handlers[i].vh_priv);
}
- return DDI_INTR_CLAIMED;
+ return (DDI_INTR_CLAIMED);
}
static int
@@ -965,7 +993,7 @@ virtio_register_intx(struct virtio_softc *sc,
if (config_handler)
config_handler_count = 1;
- vhc = kmem_alloc(sizeof(struct virtio_int_handler) *
+ vhc = kmem_alloc(sizeof (struct virtio_int_handler) *
(vq_handler_count + config_handler_count),
KM_SLEEP);
if (!vhc) {
@@ -980,10 +1008,10 @@ virtio_register_intx(struct virtio_softc *sc,
if (config_handler) {
memcpy(&vhc->config_handler, config_handler,
- sizeof(struct virtio_int_handler));
+ sizeof (struct virtio_int_handler));
}
- sc->sc_intr_htable = kmem_zalloc(sizeof(ddi_intr_handle_t),
+ sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t),
KM_SLEEP);
if (!sc->sc_intr_htable) {
dev_err(sc->sc_dev, CE_WARN,
@@ -1001,7 +1029,7 @@ virtio_register_intx(struct virtio_softc *sc,
}
/* Can't happen, we requested 1, and ddi_intr_alloc did not fail. */
- ASSERT (actual == 1);
+ ASSERT(actual == 1);
sc->sc_intr_num = 1;
ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
@@ -1036,9 +1064,9 @@ virtio_register_intx(struct virtio_softc *sc,
out_prio:
ddi_intr_free(sc->sc_intr_htable[0]);
out_int_alloc:
- kmem_free(sc->sc_intr_htable, sizeof(ddi_intr_handle_t));
+ kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
out_handle:
- kmem_free(vhc, sizeof(struct virtio_int_handler) *
+ kmem_free(vhc, sizeof (struct virtio_int_handler) *
(vq_handler_count + config_handler_count));
out:
return (ret);
@@ -1060,7 +1088,7 @@ virtio_register_ints(struct virtio_softc *sc,
goto out_inttype;
}
- /* If we have msi, let's use them.*/
+ /* If we have msi, let's use them. */
if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
ret = virtio_register_msi(sc, config_handler,
vq_handlers, intr_types);
@@ -1073,7 +1101,7 @@ virtio_register_ints(struct virtio_softc *sc,
dev_err(sc->sc_dev, CE_WARN,
"Using legacy interrupts");
- return virtio_register_intx(sc, config_handler, vq_handlers);
+ return (virtio_register_intx(sc, config_handler, vq_handlers));
}
dev_err(sc->sc_dev, CE_WARN,
@@ -1110,8 +1138,7 @@ virtio_release_ints(struct virtio_softc *sc)
}
- /* Disable the iterrupts. Either the whole block, or
- * one by one. */
+ /* Disable the iterrupts. Either the whole block, or one by one. */
if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
ret = ddi_intr_block_disable(sc->sc_intr_htable,
sc->sc_intr_num);
@@ -1141,7 +1168,7 @@ virtio_release_ints(struct virtio_softc *sc)
ddi_intr_free(sc->sc_intr_htable[i]);
kmem_free(sc->sc_intr_htable,
- sizeof(ddi_intr_handle_t) * sc->sc_intr_num);
+ sizeof (ddi_intr_handle_t) * sc->sc_intr_num);
/* After disabling interrupts, the config offset is non-MSI. */
View
148 virtio/virtioreg.h
@@ -29,7 +29,9 @@
* Part of the file derived from `Virtio PCI Card Specification v0.8.6 DRAFT'
* Appendix A.
*/
-/* An interface for efficient virtio implementation.
+
+/*
+ * An interface for efficient virtio implementation.
*
* This header is BSD licensed so anyone can use the definitions
* to implement compatible drivers/servers.
@@ -48,10 +50,10 @@
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' ANDANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -67,101 +69,109 @@
#include <sys/types.h>
-#define PCI_VENDOR_QUMRANET 0x1af4
-#define PCI_DEV_VIRTIO_MIN 0x1000
-#define PCI_DEV_VIRTIO_MAX 0x103f
-#define VIRTIO_PCI_ABI_VERSION 0
+#define PCI_VENDOR_QUMRANET 0x1af4
+#define PCI_DEV_VIRTIO_MIN 0x1000
+#define PCI_DEV_VIRTIO_MAX 0x103f
+#define VIRTIO_PCI_ABI_VERSION 0
/* Virtio product id (subsystem) */
-#define PCI_PRODUCT_VIRTIO_NETWORK 1
-#define PCI_PRODUCT_VIRTIO_BLOCK 2
-#define PCI_PRODUCT_VIRTIO_CONSOLE 3
-#define PCI_PRODUCT_VIRTIO_ENTROPY 4
-#define PCI_PRODUCT_VIRTIO_BALLOON 5
-#define PCI_PRODUCT_VIRTIO_9P 9
+#define PCI_PRODUCT_VIRTIO_NETWORK 1
+#define PCI_PRODUCT_VIRTIO_BLOCK 2
+#define PCI_PRODUCT_VIRTIO_CONSOLE 3
+#define PCI_PRODUCT_VIRTIO_ENTROPY 4
+#define PCI_PRODUCT_VIRTIO_BALLOON 5
+#define PCI_PRODUCT_VIRTIO_9P 9
/* Virtio header */
-#define VIRTIO_CONFIG_DEVICE_FEATURES 0 /* 32bit */
-#define VIRTIO_CONFIG_GUEST_FEATURES 4 /* 32bit */
+#define VIRTIO_CONFIG_DEVICE_FEATURES 0 /* 32bit */
+#define VIRTIO_CONFIG_GUEST_FEATURES 4 /* 32bit */
-#define VIRTIO_F_NOTIFY_ON_EMPTY (1<<24)
-#define VIRTIO_F_RING_INDIRECT_DESC (1<<28)
-#define VIRTIO_F_BAD_FEATURE (1<<30)
+#define VIRTIO_F_NOTIFY_ON_EMPTY (1<<24)
+#define VIRTIO_F_RING_INDIRECT_DESC (1<<28)
+#define VIRTIO_F_BAD_FEATURE (1<<30)
-#define VIRTIO_CONFIG_QUEUE_ADDRESS 8 /* 32bit */
-#define VIRTIO_CONFIG_QUEUE_SIZE 12 /* 16bit */
-#define VIRTIO_CONFIG_QUEUE_SELECT 14 /* 16bit */
-#define VIRTIO_CONFIG_QUEUE_NOTIFY 16 /* 16bit */
-#define VIRTIO_CONFIG_DEVICE_STATUS 18 /* 8bit */
+#define VIRTIO_CONFIG_QUEUE_ADDRESS 8 /* 32bit */
+#define VIRTIO_CONFIG_QUEUE_SIZE 12 /* 16bit */
+#define VIRTIO_CONFIG_QUEUE_SELECT 14 /* 16bit */
+#define VIRTIO_CONFIG_QUEUE_NOTIFY 16 /* 16bit */
+#define VIRTIO_CONFIG_DEVICE_STATUS 18 /* 8bit */
-#define VIRTIO_CONFIG_DEVICE_STATUS_RESET 0
-#define VIRTIO_CONFIG_DEVICE_STATUS_ACK 1
-#define VIRTIO_CONFIG_DEVICE_STATUS_DRIVER 2
-#define VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK 4
-#define VIRTIO_CONFIG_DEVICE_STATUS_FAILED 128
+#define VIRTIO_CONFIG_DEVICE_STATUS_RESET 0
+#define VIRTIO_CONFIG_DEVICE_STATUS_ACK 1
+#define VIRTIO_CONFIG_DEVICE_STATUS_DRIVER 2
+#define VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK 4
+#define VIRTIO_CONFIG_DEVICE_STATUS_FAILED 128
-#define VIRTIO_CONFIG_ISR_STATUS 19 /* 8bit */
-#define VIRTIO_CONFIG_ISR_CONFIG_CHANGE 2
+#define VIRTIO_CONFIG_ISR_STATUS 19 /* 8bit */
+#define VIRTIO_CONFIG_ISR_CONFIG_CHANGE 2
-#define VIRTIO_CONFIG_CONFIG_VECTOR 20 /* 16bit, optional */
-#define VIRTIO_CONFIG_QUEUE_VECTOR 22
+#define VIRTIO_CONFIG_CONFIG_VECTOR 20 /* 16bit, optional */
+#define VIRTIO_CONFIG_QUEUE_VECTOR 22
-#define VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI 20
-#define VIRTIO_CONFIG_DEVICE_CONFIG_MSI 24
+#define VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI 20
+#define VIRTIO_CONFIG_DEVICE_CONFIG_MSI 24
-#define VIRTIO_MSI_NO_VECTOR 0xffff
+#define VIRTIO_MSI_NO_VECTOR 0xffff
/* Virtqueue */
/* This marks a buffer as continuing via the next field. */
-#define VRING_DESC_F_NEXT 1
-/* This marks a buffer as write-only, from the devices's perspective.
- (otherwise read-only). */
-#define VRING_DESC_F_WRITE 2
+#define VRING_DESC_F_NEXT 1
+/*
+ * This marks a buffer as write-only, from the devices's perspective.
+ * (otherwise read-only).
+ */
+#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
-#define VRING_DESC_F_INDIRECT 4
+#define VRING_DESC_F_INDIRECT 4
-/* The Host uses this in used->flags to advise the Guest: don't kick me
+/*
+ * The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
- * optimization. Guest will still kick if it's out of buffers. */
-#define VRING_USED_F_NO_NOTIFY 1
-/* The Guest uses this in avail->flags to advise the Host: don't
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/*
+ * The Guest uses this in avail->flags to advise the Host: don't
* interrupt me when you consume a buffer. It's unreliable, so it's
- * simply an optimization. */
-#define VRING_AVAIL_F_NO_INTERRUPT 1
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
-/* Virtio ring descriptors: 16 bytes.
- * These can chain together via "next". */
+/*
+ * Virtio ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
struct vring_desc {
- /* Address (guest-physical). */
- uint64_t addr;
- /* Length. */
- uint32_t len;
- /* The flags as indicated above. */
- uint16_t flags;
- /* We chain unused descriptors via this, too */
- uint16_t next;
+ /* Address (guest-physical). */
+ uint64_t addr;
+ /* Length. */
+ uint32_t len;
+ /* The flags as indicated above. */
+ uint16_t flags;
+ /* We chain unused descriptors via this, too */
+ uint16_t next;
} __packed;
struct vring_avail {
- uint16_t flags;
- uint16_t idx;
- uint16_t ring[];
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
} __packed;
/* u32 is used here for ids for padding reasons. */
struct vring_used_elem {
- /* Index of start of used descriptor chain. */
- uint32_t id;
- /* Total length of the descriptor chain which was written to. */
- uint32_t len;
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
} __packed;
struct vring_used {
- uint16_t flags;
- uint16_t idx;
- struct vring_used_elem ring[];
+ uint16_t flags;
+ uint16_t idx;
+ struct vring_used_elem ring[];
} __packed;
-#define VIRTIO_PAGE_SIZE (4096)
+#define VIRTIO_PAGE_SIZE (4096)
#endif /* _DEV_PCI_VIRTIOREG_H_ */
View
83 virtio/virtiovar.h
@@ -29,7 +29,8 @@
* Part of the file derived from `Virtio PCI Card Specification v0.8.6 DRAFT'
* Appendix A.
*/
-/* An interface for efficient virtio implementation.
+/*
+ * An interface for efficient virtio implementation.
*
* This header is BSD licensed so anyone can use the definitions
* to implement compatible drivers/servers.
@@ -48,10 +49,10 @@
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' ANDANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -70,37 +71,38 @@
#include <sys/cmn_err.h>
#include <sys/list.h>
-#define TRACE { \
- cmn_err (CE_NOTE, "^%s:%d %s()\n", __FILE__, __LINE__, __func__); \
- /*delay(drv_usectohz(1000000)); */\
+#define TRACE { \
+ cmn_err(CE_NOTE, "^%s:%d %s()\n", __FILE__, __LINE__, __func__); \
}
-#define FAST_TRACE { \
- cmn_err (CE_NOTE, "^%s:%d %s()\n", __FILE__, __LINE__, __func__); \
+#define FAST_TRACE { \
+ cmn_err(CE_NOTE, "^%s:%d %s()\n", __FILE__, __LINE__, __func__); \
}
+
typedef boolean_t bool;
-#define __packed __attribute__((packed))
+#define __packed __attribute__((packed))
struct vq_entry {
list_node_t qe_list;
struct virtqueue *qe_queue;
uint16_t qe_index; /* index in vq_desc array */
- uint16_t qe_used_len; /* Set when the descriptor gets back from device*/
+ /* Set when the descriptor gets back from device */
+ uint16_t qe_used_len;
/* followings are used only when it is the `head' entry */
struct vq_entry *qe_next;
struct vring_desc *qe_desc;
- struct vring_desc *ind_next;
+ struct vring_desc *ind_next;
};
struct virtqueue {
struct virtio_softc *vq_owner;
- unsigned int vq_num; /* queue size (# of entries) */
+ unsigned int vq_num; /* queue size (# of entries) */
int vq_index; /* queue number (0, 1, ...) */
/* vring pointers (KVA) */
- struct vring_desc *vq_descs;
- struct vring_avail *vq_avail;
- struct vring_used *vq_used;
+ struct vring_desc *vq_descs;
+ struct vring_avail *vq_avail;
+ struct vring_used *vq_used;
void *vq_indirect;
/* virtqueue allocation info */
@@ -138,52 +140,27 @@ struct virtio_softc {
uint32_t sc_features;
int sc_indirect;
- int sc_nvqs; /* set by the user */
+ int sc_nvqs; /* set by the user */
ddi_intr_handle_t *sc_intr_htable;
int sc_intr_num;
int sc_intr_cap;
};
-//typedef int (*virtio_int_func) (struct virtio_softc *sc, void *priv);
-
struct virtio_int_handler {
ddi_intr_handler_t *vh_func;
void *vh_priv;
};
-/* The standard layout for the ring is a continuous chunk of memory which
- * looks like this. We assume num is a power of 2.
- *
- * struct vring {
- * // The actual descriptors (16 bytes each)
- * struct vring_desc desc[num];
- *
- * // A ring of available descriptor heads with free-running index.
- * __u16 avail_flags;
- * __u16 avail_idx;
- * __u16 available[num];
- *
- * // Padding to the next align boundary.
- * char pad[];
- *
- * // A ring of used descriptor heads with free-running index.
- * __u16 used_flags;
- * __u16 used_idx;
- * struct vring_used_elem used[num];
- * };
- * Note: for virtio PCI, align is 4096.
- */
-
/* public interface */
void virtio_init(struct virtio_softc *sc);
-uint32_t virtio_negotiate_features(struct virtio_softc*, uint32_t);
+uint32_t virtio_negotiate_features(struct virtio_softc *, uint32_t);
size_t virtio_show_features(struct virtio_softc *sc, uint32_t features,
char *buffer, size_t len);
boolean_t virtio_has_feature(struct virtio_softc *sc, uint32_t feature);
-void virtio_set_status(struct virtio_softc *sc, int );
-#define virtio_device_reset(sc) virtio_set_status((sc), 0)
+void virtio_set_status(struct virtio_softc *sc, int);
+#define virtio_device_reset(sc) virtio_set_status((sc), 0)
uint8_t virtio_read_device_config_1(struct virtio_softc *, int);
uint16_t virtio_read_device_config_2(struct virtio_softc *, int);
@@ -196,31 +173,27 @@ void virtio_write_device_config_8(struct virtio_softc *, int, uint64_t);
struct virtqueue * virtio_alloc_vq(struct virtio_softc *sc,
int index, int maxnsegs, int size, const char *name);
-void virtio_free_vq(struct virtqueue*);
+void virtio_free_vq(struct virtqueue *);
void virtio_reset(struct virtio_softc *);
struct vq_entry * vq_alloc_entry(struct virtqueue *vq);
void vq_free_entry(struct virtqueue *vq, struct vq_entry *qe);
-int virtio_vq_intr(struct virtio_softc *);
void virtio_stop_vq_intr(struct virtqueue *);
void virtio_start_vq_intr(struct virtqueue *);
-//void virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second);
-
-
void virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
ddi_dma_cookie_t dma_cookie, unsigned int ncookies, bool write);
void virtio_ve_add_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
- bool write);
+ bool write);
void virtio_ve_set_indirect(struct vq_entry *qe, int nsegs, bool write);
void virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
- bool write);
+ bool write);
void virtio_push_chain(struct vq_entry *qe, boolean_t sync);
-void virtio_sync_vq(struct virtqueue *vq);
-
struct vq_entry * virtio_pull_chain(struct virtqueue *vq, size_t *len);
void virtio_free_chain(struct vq_entry *ve);
+void virtio_sync_vq(struct virtqueue *vq);
+
int virtio_register_ints(struct virtio_softc *sc,
struct virtio_int_handler *config_handler,
struct virtio_int_handler vq_handlers[]);
View
186 virtio_blk/vioblk.c
@@ -44,45 +44,45 @@
#include "util.h"
/* Feature bits */
-#define VIRTIO_BLK_F_BARRIER (1<<0)
-#define VIRTIO_BLK_F_SIZE_MAX (1<<1)
-#define VIRTIO_BLK_F_SEG_MAX (1<<2)
-#define VIRTIO_BLK_F_GEOMETRY (1<<4)
-#define VIRTIO_BLK_F_RO (1<<5)
-#define VIRTIO_BLK_F_BLK_SIZE (1<<6)
-#define VIRTIO_BLK_F_SCSI (1<<7)
-#define VIRTIO_BLK_F_FLUSH (1<<9)
-#define VIRTIO_BLK_F_SECTOR_MAX (1<<10)
+#define VIRTIO_BLK_F_BARRIER (1<<0)
+#define VIRTIO_BLK_F_SIZE_MAX (1<<1)
+#define VIRTIO_BLK_F_SEG_MAX (1<<2)
+#define VIRTIO_BLK_F_GEOMETRY (1<<4)
+#define VIRTIO_BLK_F_RO (1<<5)
+#define VIRTIO_BLK_F_BLK_SIZE (1<<6)
+#define VIRTIO_BLK_F_SCSI (1<<7)
+#define VIRTIO_BLK_F_FLUSH (1<<9)
+#define VIRTIO_BLK_F_SECTOR_MAX (1<<10)
/* Configuration registers */
-#define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
-#define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
-#define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
-#define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
-#define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
-#define VIRTIO_BLK_CONFIG_SECTOR_MAX 24 /* 32bit */
+#define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
+#define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
+#define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
+#define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
+#define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
+#define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
+#define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
+#define VIRTIO_BLK_CONFIG_SECTOR_MAX 24 /* 32bit */
/* Command */
-#define VIRTIO_BLK_T_IN 0
-#define VIRTIO_BLK_T_OUT 1
-#define VIRTIO_BLK_T_SCSI_CMD 2
-#define VIRTIO_BLK_T_SCSI_CMD_OUT 3
-#define VIRTIO_BLK_T_FLUSH 4
-#define VIRTIO_BLK_T_FLUSH_OUT 5
-#define VIRTIO_BLK_T_GET_ID 8
-#define VIRTIO_BLK_T_BARRIER 0x80000000
+#define VIRTIO_BLK_T_IN 0
+#define VIRTIO_BLK_T_OUT 1
+#define VIRTIO_BLK_T_SCSI_CMD 2
+#define VIRTIO_BLK_T_SCSI_CMD_OUT 3
+#define VIRTIO_BLK_T_FLUSH 4
+#define VIRTIO_BLK_T_FLUSH_OUT 5
+#define VIRTIO_BLK_T_GET_ID 8
+#define VIRTIO_BLK_T_BARRIER 0x80000000
-#define VIRTIO_BLK_ID_BYTES 20 /* devid */
+#define VIRTIO_BLK_ID_BYTES 20 /* devid */
/* Statuses */
-#define VIRTIO_BLK_S_OK 0
-#define VIRTIO_BLK_S_IOERR 1
-#define VIRTIO_BLK_S_UNSUPP 2
+#define VIRTIO_BLK_S_OK 0
+#define VIRTIO_BLK_S_IOERR 1
+#define VIRTIO_BLK_S_UNSUPP 2
-#define MAXPHYS (1024*1024)
-#define MAXINDIRECT (128)
+#define MAXPHYS (1024*1024)
+#define MAXINDIRECT (128)
/*
* Static Variables.
@@ -138,7 +138,7 @@ struct vioblk_softc {
bd_handle_t bd_h;
struct vioblk_req *sc_reqs;
struct vioblk_stats *ks_data;
- kstat_t *sc_intrstat;
+ kstat_t *sc_intrstat;
uint64_t sc_capacity;
uint64_t sc_nblks;
struct vioblk_lstats sc_stats;
@@ -211,7 +211,7 @@ static struct modlinkage modlinkage = {
ddi_device_acc_attr_t vioblk_attr = {
DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC, /* virtio is always native byte order */
+ DDI_NEVERSWAP_ACC, /* virtio is always native byte order */
DDI_STRICTORDER_ACC
};
@@ -227,7 +227,7 @@ static ddi_dma_attr_t vioblk_req_dma_attr = {
0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
- DDI_DMA_FORCE_PHYSICAL /* dma_attr_flags */
+ DDI_DMA_FORCE_PHYSICAL /* dma_attr_flags */
};
static ddi_dma_attr_t vioblk_bd_dma_attr = {
@@ -242,12 +242,12 @@ static ddi_dma_attr_t vioblk_bd_dma_attr = {
0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */
-1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
- DDI_DMA_FORCE_PHYSICAL /* dma_attr_flags */
+ DDI_DMA_FORCE_PHYSICAL /* dma_attr_flags */
};
static int
vioblk_rw_indirect(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
- uint32_t len)
+ uint32_t len)
{
struct vioblk_req *req;
struct vq_entry *ve_hdr;
@@ -256,7 +256,7 @@ vioblk_rw_indirect(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
int total_cookies, ret, write;
write = (type == VIRTIO_BLK_T_OUT ||
- type == VIRTIO_BLK_T_FLUSH_OUT) ? 1 : 0;
+ type == VIRTIO_BLK_T_FLUSH_OUT) ? 1 : 0;
ncookies = 0;
total_cookies = 2;
@@ -306,21 +306,21 @@ vioblk_rw_indirect(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
virtio_ve_set_indirect(ve_hdr, ncookies + 2, B_TRUE);
/* sending header */
- ddi_dma_sync(req->dmah, 0, sizeof(struct vioblk_req_hdr),
+ ddi_dma_sync(req->dmah, 0, sizeof (struct vioblk_req_hdr),
DDI_DMA_SYNC_FORDEV);
virtio_ve_add_buf(ve_hdr, req->dmac.dmac_laddress,
- sizeof(struct vioblk_req_hdr), B_TRUE);
+ sizeof (struct vioblk_req_hdr), B_TRUE);
/* sending payload */
- if(len > 0) {
+ if (len > 0) {
virtio_ve_add_cookie(ve_hdr, req->bd_dmah, dma_cookie,
ncookies, write ? B_TRUE : B_FALSE);
total_cookies += ncookies;
}
virtio_ve_add_buf(ve_hdr,
- req->dmac.dmac_laddress + sizeof(struct vioblk_req_hdr),
- sizeof(uint8_t), B_FALSE);
+ req->dmac.dmac_laddress + sizeof (struct vioblk_req_hdr),
+ sizeof (uint8_t), B_FALSE);
/* sending the whole chain to the device */
virtio_push_chain(ve_hdr, B_TRUE);
@@ -346,7 +346,7 @@ vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type, uint32_t len)
int total_cookies, ret, dma_bound, write;
write = (type == VIRTIO_BLK_T_OUT ||
- type == VIRTIO_BLK_T_FLUSH_OUT) ? 1 : 0;
+ type == VIRTIO_BLK_T_FLUSH_OUT) ? 1 : 0;
dma_bound = 0;
total_cookies = 2;
@@ -374,10 +374,10 @@ vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type, uint32_t len)
ve = ve_hdr;
/* sending header */
- ddi_dma_sync(req->dmah, 0, sizeof(struct vioblk_req_hdr),
- DDI_DMA_SYNC_FORDEV);
+ ddi_dma_sync(req->dmah, 0, sizeof (struct vioblk_req_hdr),
+ DDI_DMA_SYNC_FORDEV);
virtio_ve_set(ve, req->dmac.dmac_laddress,
- sizeof(struct vioblk_req_hdr), B_TRUE);
+ sizeof (struct vioblk_req_hdr), B_TRUE);
/* sending payload */
if (len > 0) {
@@ -447,8 +447,8 @@ vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type, uint32_t len)
ve = ve_next;
virtio_ve_set(ve,
- req->dmac.dmac_laddress + sizeof(struct vioblk_req_hdr),
- sizeof(uint8_t), B_FALSE);
+ req->dmac.dmac_laddress + sizeof (struct vioblk_req_hdr),
+ sizeof (uint8_t), B_FALSE);
/* sending the whole chain to the device */
virtio_push_chain(ve_hdr, B_TRUE);
@@ -542,8 +542,8 @@ vioblk_dump(void *arg, bd_xfer_t *xfer_in)
ddi_dma_unbind_handle(req->bd_dmah);
/* syncing status */
- ddi_dma_sync(req->dmah, sizeof(struct vioblk_req_hdr),
- sizeof(uint8_t), DDI_DMA_SYNC_FORKERNEL);
+ ddi_dma_sync(req->dmah, sizeof (struct vioblk_req_hdr),
+ sizeof (uint8_t), DDI_DMA_SYNC_FORKERNEL);
/* returning chain back to virtio */
virtio_free_chain(ve);
@@ -591,7 +591,8 @@ vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
mutex_enter(&sc->lock_devid);
/* non-indirect call is fine here */
- ret = vioblk_rw(sc, &sc->xfer_devid, VIRTIO_BLK_T_GET_ID, VIRTIO_BLK_ID_BYTES);
+ ret = vioblk_rw(sc, &sc->xfer_devid, VIRTIO_BLK_T_GET_ID,
+ VIRTIO_BLK_ID_BYTES);
if (ret) {
mutex_exit(&sc->lock_devid);
return (ret);
@@ -670,7 +671,8 @@ vioblk_match(dev_info_t *devinfo, ddi_acc_handle_t pconf)
"Subsystem ID does not match: %x, expected %x",
vendor, PCI_VENDOR_QUMRANET);
dev_err(devinfo, CE_NOTE,
- "This is a virtio device, but not virtio-blk, skipping");
+ "This is a virtio device, but not virtio-blk, "
+ "skipping");
return (DDI_FAILURE);
}
@@ -686,7 +688,7 @@ vioblk_show_features(struct vioblk_softc *sc, const char *prefix,
{
char buf[512];
char *bufp = buf;
- char *bufend = buf + sizeof(buf);
+ char *bufend = buf + sizeof (buf);
bufp += snprintf(bufp, bufend - bufp, prefix);
@@ -727,21 +729,23 @@ vioblk_dev_features(struct vioblk_softc *sc)
uint32_t host_features;
host_features = virtio_negotiate_features(&sc->sc_virtio,
- (VIRTIO_BLK_F_RO |
- VIRTIO_BLK_F_GEOMETRY |
- VIRTIO_BLK_F_BLK_SIZE |
- VIRTIO_BLK_F_FLUSH |
- VIRTIO_BLK_F_SEG_MAX |
- VIRTIO_BLK_F_SIZE_MAX |
- VIRTIO_F_RING_INDIRECT_DESC));
+ VIRTIO_BLK_F_RO |
+ VIRTIO_BLK_F_GEOMETRY |
+ VIRTIO_BLK_F_BLK_SIZE |
+ VIRTIO_BLK_F_FLUSH |
+ VIRTIO_BLK_F_SEG_MAX |
+ VIRTIO_BLK_F_SIZE_MAX |
+ VIRTIO_F_RING_INDIRECT_DESC);
if (!(sc->sc_virtio.sc_features & VIRTIO_BLK_F_BLK_SIZE)) {
- dev_err(sc->sc_dev, CE_NOTE, "Error while negotiating host features");
+ dev_err(sc->sc_dev, CE_NOTE,
+ "Error while negotiating host features");
return (DDI_FAILURE);
}
vioblk_show_features(sc, "Host features: ", host_features);
- vioblk_show_features(sc, "Negotiated features: ", sc->sc_virtio.sc_features);
+ vioblk_show_features(sc, "Negotiated features: ",
+ sc->sc_virtio.sc_features);
return (DDI_SUCCESS);
}
@@ -749,7 +753,8 @@ vioblk_dev_features(struct vioblk_softc *sc)
/*
* Interrupt service routine.
*/
-uint_t vioblk_int_handler(caddr_t arg1, caddr_t arg2)
+uint_t
+vioblk_int_handler(caddr_t arg1, caddr_t arg2)
{
struct virtio_softc *vsc = (void *)arg1;
struct vioblk_softc *sc = container_of(vsc,
@@ -769,8 +774,8 @@ uint_t vioblk_int_handler(caddr_t arg1, caddr_t arg2)
ddi_dma_unbind_handle(req->bd_dmah);
/* syncing status */
- ddi_dma_sync(req->dmah, sizeof(struct vioblk_req_hdr),
- sizeof(uint8_t), DDI_DMA_SYNC_FORKERNEL);
+ ddi_dma_sync(req->dmah, sizeof (struct vioblk_req_hdr),
+ sizeof (uint8_t), DDI_DMA_SYNC_FORKERNEL);
/* returning chain back to virtio */
virtio_free_chain(ve);
@@ -812,9 +817,10 @@ uint_t vioblk_int_handler(caddr_t arg1, caddr_t arg2)
return (DDI_INTR_CLAIMED);
}
-uint_t vioblk_config_handler(caddr_t arg1, caddr_t arg2)
+uint_t
+vioblk_config_handler(caddr_t arg1, caddr_t arg2)
{
- /* We want to know if we ever get here. */
+ /* We want to know if we ever get here. */
TRACE;
return (DDI_INTR_CLAIMED);
@@ -847,14 +853,14 @@ vioblk_alloc_reqs(struct vioblk_softc *sc)
qsize = sc->sc_vq->vq_num;
- sc->sc_reqs = kmem_zalloc(sizeof(struct vioblk_req) * qsize, KM_SLEEP);
+ sc->sc_reqs = kmem_zalloc(sizeof (struct vioblk_req) * qsize, KM_SLEEP);
if (!sc->sc_reqs) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to allocate the reqs buffers array");
return (ENOMEM);
}
- for (i = 0 ; i < qsize; i++) {
+ for (i = 0; i < qsize; i++) {
struct vioblk_req *req = &sc->sc_reqs[i];
if (ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
@@ -875,12 +881,14 @@ vioblk_alloc_reqs(struct vioblk_softc *sc)
goto exit;
}
- if (ddi_dma_addr_bind_handle(req->dmah, NULL, (caddr_t)&req->hdr,
- sizeof(struct vioblk_req_hdr) + sizeof(uint8_t),
+ if (ddi_dma_addr_bind_handle(req->dmah, NULL,
+ (caddr_t) &req->hdr,
+ sizeof (struct vioblk_req_hdr) + sizeof (uint8_t),
DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
NULL, &req->dmac, &req->ndmac)) {
- dev_err(sc->sc_dev, CE_WARN, "Can't bind req buffer %d", i);
+ dev_err(sc->sc_dev, CE_WARN,
+ "Can't bind req buffer %d", i);
goto exit;
}
}
@@ -901,7 +909,7 @@ vioblk_alloc_reqs(struct vioblk_softc *sc)
ddi_dma_free_handle(&req->bd_dmah);
}
- kmem_free(sc->sc_reqs, sizeof(struct vioblk_req) * qsize);
+ kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
return (ENOMEM);
}
@@ -922,7 +930,7 @@ vioblk_free_reqs(struct vioblk_softc *sc)
ddi_dma_free_handle(&req->dmah);
}
- kmem_free(sc->sc_reqs, sizeof(struct vioblk_req) * qsize);
+ kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
return (ENOMEM);
}
@@ -1011,7 +1019,8 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
/* Determine which types of interrupts supported */
ret = ddi_intr_get_supported_types(devinfo, &intr_types);
if ((ret != DDI_SUCCESS) || (!(intr_types & DDI_INTR_TYPE_FIXED))) {
- dev_err(devinfo, CE_WARN, "fixed type interrupt is not supported");
+ dev_err(devinfo, CE_WARN,
+ "fixed type interrupt is not supported");
goto exit_inttype;
}
@@ -1033,32 +1042,33 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
}
ks_data = (struct vioblk_stats *)sc->sc_intrstat->ks_data;
kstat_named_init(&ks_data->sts_rw_outofmemory,
- "total_rw_outofmemory", KSTAT_DATA_UINT64);
+ "total_rw_outofmemory", KSTAT_DATA_UINT64);
kstat_named_init(&ks_data->sts_rw_outofmappings,
- "total_rw_outofmappings", KSTAT_DATA_UINT64);
+ "total_rw_outofmappings", KSTAT_DATA_UINT64);
kstat_named_init(&ks_data->sts_rw_badoffset,
- "total_rw_badoffset", KSTAT_DATA_UINT64);
+ "total_rw_badoffset", KSTAT_DATA_UINT64);
kstat_named_init(&ks_data->sts_intr_total,
- "total_intr", KSTAT_DATA_UINT64);
+ "total_intr", KSTAT_DATA_UINT64);
kstat_named_init(&ks_data->sts_io_errors,
- "total_io_errors", KSTAT_DATA_UINT32);
+ "total_io_errors", KSTAT_DATA_UINT32);
kstat_named_init(&ks_data->sts_unsupp_errors,
- "total_unsupp_errors", KSTAT_DATA_UINT32);
+ "total_unsupp_errors", KSTAT_DATA_UINT32);
kstat_named_init(&ks_data->sts_nxio_errors,
- "total_nxio_errors", KSTAT_DATA_UINT32);
+ "total_nxio_errors", KSTAT_DATA_UINT32);
kstat_named_init(&ks_data->sts_rw_cacheflush,
- "total_rw_cacheflush", KSTAT_DATA_UINT64);
+ "total_rw_cacheflush", KSTAT_DATA_UINT64);
kstat_named_init(&ks_data->sts_rw_cookiesmax,
- "max_rw_cookies", KSTAT_DATA_UINT32);
+ "max_rw_cookies", KSTAT_DATA_UINT32);
kstat_named_init(&ks_data->sts_intr_queuemax,
- "max_intr_queue", KSTAT_DATA_UINT32);
+ "max_intr_queue", KSTAT_DATA_UINT32);
sc->ks_data = ks_data;
sc->sc_intrstat->ks_private = sc;
sc->sc_intrstat->ks_update = vioblk_ksupdate;
kstat_install(sc->sc_intrstat);
/* map BAR0 */
- ret = ddi_regs_map_setup(devinfo, 1, (caddr_t *)&sc->sc_virtio.sc_io_addr,
+ ret = ddi_regs_map_setup(devinfo, 1,
+ (caddr_t *) &sc->sc_virtio.sc_io_addr,
0, 0, &vioblk_attr, &sc->sc_virtio.sc_ioh);
if (ret != DDI_SUCCESS) {
dev_err(devinfo, CE_WARN, "unable to map bar0: [%d]", ret);
@@ -1105,14 +1115,14 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SEG_MAX) {
sc->sc_seg_max = virtio_read_device_config_4(&sc->sc_virtio,
VIRTIO_BLK_CONFIG_SEG_MAX);
- if(sc->sc_seg_max) {
+ if (sc->sc_seg_max) {
vioblk_bd_dma_attr.dma_attr_sgllen = sc->sc_seg_max;
}
}
if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SIZE_MAX) {
sc->sc_size_max = virtio_read_device_config_4(&sc->sc_virtio,
VIRTIO_BLK_CONFIG_SIZE_MAX);
- if(sc->sc_size_max) {
+ if (sc->sc_size_max) {
vioblk_bd_dma_attr.dma_attr_maxxfer = sc->sc_size_max;
}
}
View
244 virtio_net/vioif.c
@@ -67,8 +67,8 @@
#include <sys/ethernet.h>
/* Please export sys/vlan.h as part of ddi */
-//#include <sys/vlan.h>
-#define VLAN_TAGSZ 4
+// #include <sys/vlan.h>
+#define VLAN_TAGSZ 4
#include <sys/dlpi.h>
#include <sys/taskq.h>
@@ -91,31 +91,31 @@
#include "virtioreg.h"
/* Configuration registers */
-#define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
-#define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
+#define VIRTIO_NET_CONFIG_MAC 0 /* 8bit x 6byte */
+#define VIRTIO_NET_CONFIG_STATUS 6 /* 16bit */
/* Feature bits */
-#define VIRTIO_NET_F_CSUM (1 << 0) /* Host handles pkts w/ partial csum */
-#define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* Guest handles pkts w/ partial csum */
-#define VIRTIO_NET_F_MAC (1 << 5) /* Host has given MAC address. */
-#define VIRTIO_NET_F_GSO (1 << 6) /* Host handles pkts w/ any GSO type */
-#define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* Guest can handle TSOv4 in. */
-#define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* Guest can handle TSOv6 in. */
-#define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* Guest can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* Guest can handle UFO in. */
-#define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* Host can handle TSOv4 in. */
-#define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* Host can handle TSOv6 in. */
-#define VIRTIO_NET_F_HOST_ECN (1 << 13) /* Host can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_HOST_UFO (1 << 14) /* Host can handle UFO in. */
-#define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* Host can merge receive buffers. */
-#define VIRTIO_NET_F_STATUS (1 << 16) /* virtio_net_config.status available */
-#define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* Control channel available */
-#define VIRTIO_NET_F_CTRL_RX (1 << 18) /* Control channel RX mode support */
-#define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* Control channel VLAN filtering */
-#define VIRTIO_NET_F_CTRL_RX_EXTRA (1 << 20) /* Extra RX mode control support */
+#define VIRTIO_NET_F_CSUM (1 << 0) /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* Guest handles pkts w/ part csum */
+#define VIRTIO_NET_F_MAC (1 << 5) /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO (1 << 6) /* Host handles pkts w/ any GSO type */
+#define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* Guest can handle TSO[6] w/ ECN in */
+#define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN (1 << 13) /* Host can handle TSO[6] w/ ECN in */
+#define VIRTIO_NET_F_HOST_UFO (1 << 14) /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS (1 << 16) /* Config.status available */
+#define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX (1 << 18) /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* Control channel VLAN filtering */
+#define VIRTIO_NET_F_CTRL_RX_EXTRA (1 << 20) /* Extra RX mode control support */
/* Status */
-#define VIRTIO_NET_S_LINK_UP 1
+#define VIRTIO_NET_S_LINK_UP 1
/* Packet header structure */
struct virtio_net_hdr {
@@ -133,14 +133,14 @@ struct virtio_net_hdr_mrg {
uint16_t num_buffers;
};
-#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
-#define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
-#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
-#define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
-#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
-#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* flags */
+#define VIRTIO_NET_HDR_GSO_NONE 0 /* gso_type */
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* gso_type */
+#define VIRTIO_NET_HDR_GSO_UDP 3 /* gso_type */
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* gso_type */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* gso_type, |'ed */
-#define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
+#define VIRTIO_NET_MAX_GSO_LEN (65536+ETHER_HDR_LEN)
/* Control virtqueue */
struct virtio_net_ctrl_cmd {
@@ -148,16 +148,16 @@ struct virtio_net_ctrl_cmd {
uint8_t command;
} __packed;
-#define VIRTIO_NET_CTRL_RX 0
-#define VIRTIO_NET_CTRL_RX_PROMISC 0
-#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+#define VIRTIO_NET_CTRL_RX 0
+#define VIRTIO_NET_CTRL_RX_PROMISC 0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
-#define VIRTIO_NET_CTRL_MAC 1
-#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+#define VIRTIO_NET_CTRL_MAC 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
-#define VIRTIO_NET_CTRL_VLAN 2
-#define VIRTIO_NET_CTRL_VLAN_ADD 0
-#define VIRTIO_NET_CTRL_VLAN_DEL 1
+#define VIRTIO_NET_CTRL_VLAN 2
+#define VIRTIO_NET_CTRL_VLAN_ADD 0
+#define VIRTIO_NET_CTRL_VLAN_DEL 1
struct virtio_net_ctrl_status {
uint8_t ack;
@@ -251,31 +251,33 @@ struct vioif_softc {
/* Tx bufs - virtio_net_hdr + a copy of the packet. */
struct vioif_buf *sc_txbufs;
- kstat_t *sc_intrstat;
+ kstat_t *sc_intrstat;
kmem_cache_t *sc_rxbuf_cache;
ulong_t sc_rxloan;
unsigned int sc_rxcopy_thresh;
};
-#define ETHERVLANMTU (ETHERMAX + 4)
+#define ETHERVLANMTU (ETHERMAX + 4)
-#define DEFAULT_MTU ETHERMTU
-#define MAX_MTU 65535
+#define DEFAULT_MTU ETHERMTU
+#define MAX_MTU 65535
-/* We win a bit on header alignment, but the host wins a lot
- * more on moving aligned buffers! Might need more thought. */
-#define VIOIF_IP_ALIGN 0
+/*
+ * We win a bit on header alignment, but the host wins a lot
+ * more on moving aligned buffers! Might need more thought.
+ */
+#define VIOIF_IP_ALIGN 0
-#define VIOIF_TX_SIZE 2048
+#define VIOIF_TX_SIZE 2048
/* Same for now. */
-#define VIOIF_RX_SIZE VIOIF_TX_SIZE
+#define VIOIF_RX_SIZE VIOIF_TX_SIZE
/* Native queue size for both rx an tx. */
-#define VIOIF_RX_QLEN 0
-#define VIOIF_TX_QLEN 0
-#define VIOIF_CTRL_QLEN 0
+#define VIOIF_RX_QLEN 0
+#define VIOIF_TX_QLEN 0
+#define VIOIF_CTRL_QLEN 0
@@ -300,18 +302,18 @@ vioif_link_state(struct vioif_softc *sc)
}
static ddi_dma_attr_t vioif_buf_dma_attr = {
- DMA_ATTR_V0, /* Version number */
- 0, /* low address */
- 0xFFFFFFFF, /* high address */
- 0xFFFFFFFF, /* counter register max */
+ DMA_ATTR_V0, /* Version number */
+ 0, /* low address */
+ 0xFFFFFFFF, /* high address */
+ 0xFFFFFFFF, /* counter register max */
VIRTIO_PAGE_SIZE, /* page alignment */
- 0x3F, /* burst sizes: 1 - 32 */
- 0x1, /* minimum transfer size */
- 0xFFFFFFFF, /* max transfer size */
- 0xFFFFFFFF, /* address register max */
- 1, /* no scatter-gather */
- 1, /* device operates on bytes */
- 0, /* attr flag: set to 0 */
+ 0x3F, /* burst sizes: 1 - 32 */
+ 0x1, /* minimum transfer size */
+ 0xFFFFFFFF, /* max transfer size */
+ 0xFFFFFFFF, /* address register max */
+ 1, /* no scatter-gather */
+ 1, /* device operates on bytes */
+ 0, /* attr flag: set to 0 */
};
static ddi_device_acc_attr_t vioif_bufattr = {
@@ -416,7 +418,8 @@ vioif_free_mems(struct vioif_softc *sc)
ddi_dma_free_handle(&buf->b_dmah);
}
- kmem_free(sc->sc_txbufs, sizeof(struct vioif_buf) * sc->sc_tx_vq->vq_num);
+ kmem_free(sc->sc_txbufs, sizeof (struct vioif_buf) *
+ sc->sc_tx_vq->vq_num);
for (i = 0; i < sc->sc_rx_vq->vq_num; i++) {
struct vioif_buf *buf = sc->sc_rxbufs[i];
@@ -424,7 +427,8 @@ vioif_free_mems(struct vioif_softc *sc)
if (buf)
kmem_cache_free(sc->sc_rxbuf_cache, buf);
}
- kmem_free(sc->sc_rxbufs, sizeof(struct vioif_buf *) * sc->sc_rx_vq->vq_num);
+ kmem_free(sc->sc_rxbufs, sizeof (struct vioif_buf *) *
+ sc->sc_rx_vq->vq_num);
}
static int
@@ -438,7 +442,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
txqsize = sc->sc_tx_vq->vq_num;
rxqsize = sc->sc_rx_vq->vq_num;
- sc->sc_txbufs = kmem_zalloc(sizeof(struct vioif_buf) * txqsize,
+ sc->sc_txbufs = kmem_zalloc(sizeof (struct vioif_buf) * txqsize,
KM_SLEEP);
if (!sc->sc_txbufs) {
dev_err(sc->sc_dev, CE_WARN,
@@ -450,21 +454,23 @@ vioif_alloc_mems(struct vioif_softc *sc)
* We don't allocate the rx vioif_buffs, just the pointers.
* There might be more vioif_buffs loaned upstream
*/
- sc->sc_rxbufs = kmem_zalloc(sizeof(struct vioif_buf *) * rxqsize, KM_SLEEP);
+ sc->sc_rxbufs = kmem_zalloc(sizeof (struct vioif_buf *) * rxqsize,
+ KM_SLEEP);
if (!sc->sc_rxbufs) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to allocate the rx buffers pointer array");
goto exit_rxalloc;
}
- for (i = 0 ; i < txqsize; i++) {
+ for (i = 0; i < txqsize; i++) {
struct vioif_buf *buf = &sc->sc_txbufs[i];
if (ddi_dma_alloc_handle(sc->sc_dev, &vioif_buf_dma_attr,
DDI_DMA_SLEEP, NULL, &buf->b_dmah)) {
dev_err(sc->sc_dev, CE_WARN,
- "Can't allocate dma handle for tx buffer %d", i);
+ "Can't allocate dma handle for tx buffer %d",
+ i);
goto exit_tx;
}
@@ -481,7 +487,8 @@ vioif_alloc_mems(struct vioif_softc *sc)
len, DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
NULL, &dmac, &nsegments)) {
- dev_err(sc->sc_dev, CE_WARN, "Can't bind tx buffer %d", i);
+ dev_err(sc->sc_dev, CE_WARN,
+ "Can't bind tx buffer %d", i);
goto exit_tx;
}
@@ -507,7 +514,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
}
exit_rxalloc:
- kmem_free(sc->sc_txbufs, sizeof(struct vioif_buf) * txqsize);
+ kmem_free(sc->sc_txbufs, sizeof (struct vioif_buf) * txqsize);
exit:
return (ENOMEM);
}
@@ -562,10 +569,10 @@ static int vioif_add_rx_single(struct vioif_softc *sc, int kmflag)
}
virtio_ve_set(ve_hdr, buf->b_paddr,
- sizeof(struct virtio_net_hdr), B_FALSE);
+ sizeof (struct virtio_net_hdr), B_FALSE);
- virtio_ve_set(ve, buf->b_paddr + sizeof(struct virtio_net_hdr),
- sc->sc_rxbuf_size - sizeof(struct virtio_net_hdr),
+ virtio_ve_set(ve, buf->b_paddr + sizeof (struct virtio_net_hdr),
+ sc->sc_rxbuf_size - sizeof (struct virtio_net_hdr),
B_FALSE);
ve_hdr->qe_next = ve;
@@ -660,17 +667,17 @@ static int vioif_rx_single(struct vioif_softc *sc)
ASSERT(ve_hdr->qe_next);
ve = ve_hdr->qe_next;
- if (len < sizeof(struct virtio_net_hdr_mrg)) {
+ if (len < sizeof (struct virtio_net_hdr_mrg)) {
cmn_err(CE_WARN, "Rx: Chain too small: %ld",
- len - sizeof(struct virtio_net_hdr_mrg));
+ len - sizeof (struct virtio_net_hdr_mrg));
virtio_free_chain(ve);
continue;
}
buf = sc->sc_rxbufs[ve_hdr->qe_index];
- ddi_dma_sync(buf->b_dmah, 0, len , DDI_DMA_SYNC_FORCPU);
+ ddi_dma_sync(buf->b_dmah, 0, len, DDI_DMA_SYNC_FORCPU);
- len -= sizeof(struct virtio_net_hdr);
+ len -= sizeof (struct virtio_net_hdr);
mp = allocb(len, 0);
if (!mp) {
@@ -679,7 +686,7 @@ static int vioif_rx_single(struct vioif_softc *sc)
break;
}
- bcopy((char *)buf->b_buf + sizeof(struct virtio_net_hdr),
+ bcopy((char *)buf->b_buf + sizeof (struct virtio_net_hdr),
mp->b_rptr, len);
mp->b_wptr = mp->b_rptr + len;
@@ -716,18 +723,20 @@ static int vioif_rx_merged(struct vioif_softc *sc)
ASSERT(buf);
- if (len < sizeof(struct virtio_net_hdr_mrg)) {
+ if (len < sizeof (struct virtio_net_hdr_mrg)) {
cmn_err(CE_WARN, "Rx: Cnain too small: %ld",
- len - sizeof(struct virtio_net_hdr_mrg));
+ len - sizeof (struct virtio_net_hdr_mrg));
virtio_free_chain(ve);
continue;
}
ddi_dma_sync(buf->b_dmah, 0, len, DDI_DMA_SYNC_FORCPU);
- len -= sizeof(struct virtio_net_hdr_mrg);
+ len -= sizeof (struct virtio_net_hdr_mrg);
- /* We copy the small packets and reuse the buffers. For
- * bigger ones, we loan the buffers upstream. */
+ /*
+ * We copy the small packets and reuse the buffers. For
+ * bigger ones, we loan the buffers upstream.
+ */
if (len < sc->sc_rxcopy_thresh) {
mp = allocb(len, 0);
if (!mp) {
@@ -736,14 +745,15 @@ static int vioif_rx_merged(struct vioif_softc *sc)
break;
}
- bcopy((char *)buf->b_buf + sizeof(struct virtio_net_hdr_mrg),
- mp->b_rptr, len);
+ bcopy((char *)buf->b_buf +
+ sizeof (struct virtio_net_hdr_mrg),
+ mp->b_rptr, len);
mp->b_wptr = mp->b_rptr + len;
} else {
mp = desballoc((char *)buf->b_buf +
- sizeof(struct virtio_net_hdr_mrg) +
+ sizeof (struct virtio_net_hdr_mrg) +
VIOIF_IP_ALIGN,
len, 0, &buf->b_frtn);
@@ -755,8 +765,10 @@ static int vioif_rx_merged(struct vioif_softc *sc)
mp->b_wptr = mp->b_rptr + len;
atomic_inc_ulong(&sc->sc_rxloan);
- /* Buffer loanded, we will have to allocte a new one
- * for this slot. */
+ /*
+ * Buffer loanded, we will have to allocte a new one
+ * for this slot.
+ */
sc->sc_rxbufs[ve->qe_index] = NULL;
}
@@ -773,10 +785,10 @@ static int vioif_rx_merged(struct vioif_softc *sc)
static int vioif_process_rx(struct vioif_softc *sc)
{
if (sc->sc_merge) {
- return vioif_rx_merged(sc);
+ return (vioif_rx_merged(sc));
}
- return vioif_rx_single(sc);
+ return (vioif_rx_single(sc));
}
static void vioif_reclaim_used_tx(struct vioif_softc *sc)
@@ -809,8 +821,8 @@ vioif_send(struct vioif_softc *sc, mblk_t *mb)
size_t msg_size = 0;
int hdr_len;
- hdr_len = sc->sc_merge ? sizeof(struct virtio_net_hdr_mrg) :
- sizeof(struct virtio_net_hdr);
+ hdr_len = sc->sc_merge ? sizeof (struct virtio_net_hdr_mrg) :
+ sizeof (struct virtio_net_hdr);
msg_size = msgsize(mb);
if (msg_size > MAX_MTU) {
@@ -821,13 +833,13 @@ vioif_send(struct vioif_softc *sc, mblk_t *mb)
ve_hdr = vq_alloc_entry(sc->sc_tx_vq);
if (!ve_hdr) {
- /* Out of free descriptors - try later.*/
+ /* Out of free descriptors - try later. */
return (B_FALSE);
}
ve = vq_alloc_entry(sc->sc_tx_vq);
if (!ve) {
vq_free_entry(sc->sc_tx_vq, ve_hdr);
- /* Out of free descriptors - try later.*/
+ /* Out of free descriptors - try later. */
return (B_FALSE);
}
@@ -929,14 +941,16 @@ vioif_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
if (*new_mtu > MAX_MTU) {
dev_err(sc->sc_dev, CE_WARN,
- "Requested mtu (%d) out of range", *new_mtu);
+ "Requested mtu (%d) out of range",
+ *new_mtu);
return (EINVAL);
}
err = mac_maxsdu_update(sc->sc_mac_handle, *new_mtu);
if (err) {
dev_err(sc->sc_dev, CE_WARN,
- "Failed to set the requested mtu (%d)", *new_mtu);
+ "Failed to set the requested mtu (%d)",
+ *new_mtu);
return (err);
}
@@ -1033,7 +1047,8 @@ vioif_match(dev_info_t *devinfo, ddi_acc_handle_t pconf)
"Subsystem ID does not match: %x, expected %x",
vendor, PCI_VENDOR_QUMRANET);
dev_err(devinfo, CE_NOTE,
- "This is a virtio device, but not virtio-net, skipping");
+ "This is a virtio device, but not virtio-net, "
+ "skipping");
return (DDI_FAILURE);
}
@@ -1047,7 +1062,7 @@ vioif_show_features(struct vioif_softc *sc, const char *prefix,
{
char buf[512];
char *bufp = buf;
- char *bufend = buf + sizeof(buf);
+ char *bufend = buf + sizeof (buf);
bufp += snprintf(bufp, bufend - bufp, prefix);
@@ -1116,7 +1131,8 @@ vioif_dev_features(struct vioif_softc *sc)
VIRTIO_F_NOTIFY_ON_EMPTY);
vioif_show_features(sc, "Host features: ", host_features);
- vioif_show_features(sc, "Negotiated features: ", sc->sc_virtio.sc_features);
+ vioif_show_features(sc, "Negotiated features: ",
+ sc->sc_virtio.sc_features);
sc->sc_rxbuf_size = VIOIF_RX_SIZE;
if (sc->sc_virtio.sc_features & VIRTIO_NET_F_MRG_RXBUF) {
@@ -1129,7 +1145,7 @@ vioif_dev_features(struct vioif_softc *sc)
static int vioif_has_feature(struct vioif_softc *sc, uint32_t feature)
{
- return virtio_has_feature(&sc->sc_virtio, feature);
+ return (virtio_has_feature(&sc->sc_virtio, feature));
}
static void
@@ -1166,7 +1182,8 @@ vioif_get_mac(struct vioif_softc *sc)
vioif_set_mac(sc);
- dev_err(sc->sc_dev, CE_NOTE, "Generated a random MAC address: %s",
+ dev_err(sc->sc_dev, CE_NOTE,
+ "Generated a random MAC address: %s",
ether_sprintf((struct ether_addr *) sc->sc_mac));
}
}
@@ -1175,7 +1192,8 @@ vioif_get_mac(struct vioif_softc *sc)
/*
* Virtqueue interrupt handlers
*/
-uint_t vioif_rx_handler(caddr_t arg1, caddr_t arg2)
+uint_t
+vioif_rx_handler(caddr_t arg1, caddr_t arg2)
{
struct virtio_softc *vsc = (void *) arg1;
struct vioif_softc *sc = container_of(vsc,
@@ -1185,17 +1203,18 @@ uint_t vioif_rx_handler(caddr_t arg1, caddr_t arg2)
vioif_populate_rx(sc, KM_NOSLEEP);
- return DDI_INTR_CLAIMED;
+ return (DDI_INTR_CLAIMED);
}
-uint_t vioif_tx_handler(caddr_t arg1, caddr_t arg2)
+uint_t
+vioif_tx_handler(caddr_t arg1, caddr_t arg2)
{
struct virtio_softc *vsc = (void *)arg1;
struct vioif_softc *sc = container_of(vsc,
struct vioif_softc, sc_virtio);
vioif_reclaim_used_tx(sc);
- return DDI_INTR_CLAIMED;
+ return (DDI_INTR_CLAIMED);
}
static int
@@ -1211,7 +1230,7 @@ vioif_register_ints(struct vioif_softc *sc)
ret = virtio_register_ints(&sc->sc_virtio, NULL, vioif_vq_h);
- return ret;
+ return (ret);
}
static int
@@ -1306,11 +1325,13 @@ vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
goto exit_cache;
}
- sc->sc_rx_vq = virtio_alloc_vq(&sc->sc_virtio, 0, VIOIF_RX_QLEN, 0, "rx");
+ sc->sc_rx_vq = virtio_alloc_vq(&sc->sc_virtio, 0,
+ VIOIF_RX_QLEN, 0, "rx");
if (!sc->sc_rx_vq)
goto exit_alloc1;
- sc->sc_tx_vq = virtio_alloc_vq(&sc->sc_virtio, 1, VIOIF_TX_QLEN, 0, "tx");
+ sc->sc_tx_vq = virtio_alloc_vq(&sc->sc_virtio, 1,
+ VIOIF_TX_QLEN, 0, "tx");
if (!sc->sc_rx_vq)
goto exit_alloc2;
@@ -1349,7 +1370,8 @@ vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
/* Pre-fill the rx ring. */
vioif_populate_rx(sc, KM_SLEEP);
- virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
+ virtio_set_status(&sc->sc_virtio,
+ VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
ret = mac_register(macp, &sc->sc_mac_handle);
@@ -1360,7 +1382,8 @@ vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
ret = vioif_register_ints(sc);
if (ret) {
- dev_err(sc->sc_dev, CE_WARN, "Failed to allocate interrupt(s)!");
+ dev_err(sc->sc_dev, CE_WARN,
+ "Failed to allocate interrupt(s)!");
goto exit_ints;
}
@@ -1415,7 +1438,8 @@ vioif_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
}
if (sc->sc_rxloan) {
- cmn_err(CE_NOTE, "Some rx buffers are still upstream, Not detaching");
+ cmn_err(CE_NOTE, "Some rx buffers are still upstream, "
+ "Not detaching");
return (DDI_FAILURE);
}
Please sign in to comment.
Something went wrong with that request. Please try again.