Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Review comments

  • Loading branch information...
commit 550d53cad9f7ba037400268374857d8a69192b1d 1 parent 3080464
@xl0 authored
View
8 virtio/Makefile
@@ -18,10 +18,8 @@ all: virtio
virtio: virtio.c virtiovar.h virtioreg.h
$(CC) $(CFLAGS) -c virtio.c -o virtio.o
$(CTFCONVERT) -i -L VERSION virtio.o
- $(CC) $(CFLAGS) -c util.c -o util.o
- $(CTFCONVERT) -i -L VERSION util.o
- $(LD) $(LDFLAGS) virtio.o util.o -o virtio
- $(CTFMERGE) -L VERSION -o virtio virtio.o util.o
+ $(LD) $(LDFLAGS) virtio.o -o virtio
+ $(CTFMERGE) -L VERSION -o virtio virtio.o
clean:
- rm -f virtio virtio.o util.o
+ rm -f virtio virtio.o
View
70 virtio/util.c
@@ -1,70 +0,0 @@
-
-#include <sys/ddi.h>
-#include <sys/types.h>
-#include <sys/cmn_err.h>
-#include <sys/sunddi.h>
-#include <sys/sunndi.h>
-#include "util.h"
-
-/*
- * Add to ddi?
- */
-void
-dev_err(dev_info_t *dip, int ce, char *fmt, ...)
-{
- va_list ap;
- char buf[512];
-
- ASSERT(dip != NULL);
-
- va_start(ap, fmt);
- (void) vsnprintf(buf, sizeof (buf), fmt, ap);
- va_end(ap);
-
- cmn_err(ce, "%s%d: %s", ddi_driver_name(dip),
- ddi_get_instance(dip), buf);
-}
-
-void
-dev_panic(dev_info_t *dip, char *fmt, ...)
-{
- va_list ap;
- char buf[256];
-
- ASSERT(dip != NULL);
-
- va_start(ap, fmt);
- (void) vsnprintf(buf, sizeof (buf), fmt, ap);
- va_end(ap);
-
- panic("%s%d: %s", ddi_driver_name(dip),
- ddi_get_instance(dip), buf);
-}
-
-void
-hex_dump(char *prefix, void *addr, int len)
-{
- unsigned char *base = addr;
- char buff[256], *bptr;
- int i = 0;
- bptr = buff;
-
- cmn_err(CE_NOTE, "Dumping %d bytes starting from 0x%p",
- len, addr);
-
- while (i < len) {
- (void) sprintf(bptr, "%02x ", base[i]);
- bptr += 3;
- i++;
-
- if (!(i % 16)) {
- cmn_err(CE_NOTE, "%s: 0x%p: %s",
- prefix, (void *) (base + i - 16), buff);
- bptr = buff;
- }
- }
-
- if (i % 16)
- cmn_err(CE_NOTE, "%s: 0x%p: %s",
- prefix, (void *) (base + i - (i % 16)), buff);
-}
View
15 virtio/util.h
@@ -2,17 +2,18 @@
#include <sys/dditypes.h>
#include <sys/sysmacros.h>
+#define dev_err(dip, ce, fmt, arg...) \
+ cmn_err(ce, "%s%d: " fmt, ddi_driver_name(dip), \
+ ddi_get_instance(dip), ##arg)
+
#ifdef DEBUG
-#define dev_debug(dip, fmt, arg...) \
- dev_err(dip, fmt, ##arg)
+#define dev_debug(dip, ce, fmt, arg...) \
+ cmn_err(ce, "%s%d: " fmt, ddi_driver_name(dip), \
+ ddi_get_instance(dip), ##arg)
#else
-#define dev_debug(dip, fmt, arg...)
+#define dev_debug(dip, ce, fmt, arg...)
#endif
-void dev_err(dev_info_t *dip, int ce, char *fmt, ...);
-void dev_panic(dev_info_t *dip, char *fmt, ...);
-
-void hex_dump(char *prefix, void *addr, int len);
/*
* container_of taken from FreeBSD.
View
94 virtio/virtio.c
@@ -120,7 +120,7 @@ virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
size_t
virtio_show_features(uint32_t features,
- char *buf, size_t len)
+ char *buf, size_t len)
{
char *orig_buf = buf;
char *bufend = buf + len;
@@ -194,7 +194,7 @@ virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
void
virtio_write_device_config_1(struct virtio_softc *sc,
- unsigned int index, uint8_t value)
+ unsigned int index, uint8_t value)
{
ASSERT(sc->sc_config_offset);
ddi_put8(sc->sc_ioh,
@@ -203,7 +203,7 @@ virtio_write_device_config_1(struct virtio_softc *sc,
void
virtio_write_device_config_2(struct virtio_softc *sc,
- unsigned int index, uint16_t value)
+ unsigned int index, uint16_t value)
{
ASSERT(sc->sc_config_offset);
ddi_put16(sc->sc_ioh,
@@ -213,7 +213,7 @@ virtio_write_device_config_2(struct virtio_softc *sc,
void
virtio_write_device_config_4(struct virtio_softc *sc,
- unsigned int index, uint32_t value)
+ unsigned int index, uint32_t value)
{
ASSERT(sc->sc_config_offset);
ddi_put32(sc->sc_ioh,
@@ -223,7 +223,7 @@ virtio_write_device_config_4(struct virtio_softc *sc,
void
virtio_write_device_config_8(struct virtio_softc *sc,
- unsigned int index, uint64_t value)
+ unsigned int index, uint64_t value)
{
ASSERT(sc->sc_config_offset);
ddi_put32(sc->sc_ioh,
@@ -320,9 +320,9 @@ virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
- "Failed to allocate dma handle for indirect descritpors,"
+ "Failed to allocate dma handle for indirect descriptors,"
" entry %d, vq %d", entry->qe_index,
entry->qe_queue->vq_index);
goto out_alloc_handle;
@@ -333,7 +333,7 @@ virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
(caddr_t *)&entry->qe_indirect_descs, &len,
&entry->qe_indirect_dma_acch);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to alocate dma memory for indirect descriptors,"
" entry %d, vq %d,", entry->qe_index,
@@ -349,7 +349,7 @@ virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies);
if (ret != DDI_DMA_MAPPED) {
dev_err(sc->sc_dev, CE_WARN,
- "Failed to bind dma memory for indirect descriptrors,"
+ "Failed to bind dma memory for indirect descriptors,"
"entry %d, vq %d", entry->qe_index,
entry->qe_queue->vq_index);
goto out_bind;
@@ -433,7 +433,7 @@ virtio_alloc_vq(struct virtio_softc *sc,
const char *name)
{
int vq_size, allocsize1, allocsize2, allocsize = 0;
- int r;
+ int ret;
unsigned int ncookies;
size_t len;
struct virtqueue *vq;
@@ -471,29 +471,29 @@ virtio_alloc_vq(struct virtio_softc *sc,
allocsize = allocsize1 + allocsize2;
- r = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
+ ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
- if (r) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to allocate dma handle for vq %d", index);
goto out_alloc_handle;
}
- r = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize, &virtio_vq_devattr,
+ ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize, &virtio_vq_devattr,
DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
(caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
- if (r) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to alocate dma memory for vq %d", index);
goto out_alloc;
}
- r = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
+ ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
(caddr_t)vq->vq_vaddr, len,
DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
- if (r != DDI_DMA_MAPPED) {
+ if (ret != DDI_DMA_MAPPED) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to bind dma memory for vq %d", index);
goto out_bind;
@@ -536,8 +536,8 @@ virtio_alloc_vq(struct virtio_softc *sc,
goto out_zalloc;
}
- r = virtio_init_vq(sc, vq);
- if (r)
+ ret = virtio_init_vq(sc, vq);
+ if (ret)
goto out_init;
dev_debug(sc->sc_dev, CE_NOTE,
@@ -697,7 +697,7 @@ virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
void
virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
- ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
+ ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
{
int i;
@@ -753,8 +753,10 @@ virtio_ve_sync_desc(struct vq_entry *qe, unsigned int direction)
{
struct virtqueue *vq = qe->qe_queue;
- /* Sync the descriptor */
- /* (The descriptor array is located at the start of the vq memory) */
+ /*
+ * Sync the descriptor.
+ * The descriptor array is located at the base of the vq memory.
+ */
(void) ddi_dma_sync(vq->vq_dma_handle,
sizeof (struct vring_desc) * qe->qe_index,
sizeof (struct vring_desc),
@@ -917,9 +919,9 @@ virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
}
static int virtio_register_msi(struct virtio_softc *sc,
- struct virtio_int_handler *config_handler,
- struct virtio_int_handler vq_handlers[],
- int intr_types)
+ struct virtio_int_handler *config_handler,
+ struct virtio_int_handler vq_handlers[],
+ int intr_types)
{
int count, actual;
int int_type;
@@ -944,7 +946,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
/* Number of MSIs supported by the device. */
ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
goto out_nomsi;
}
@@ -965,7 +967,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
goto out_msi_alloc;
}
@@ -985,7 +987,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
/* Assume they are all same priority */
ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
goto out_msi_prio;
}
@@ -995,7 +997,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
vq_handlers[i].vh_func,
sc, vq_handlers[i].vh_priv);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"ddi_intr_add_handler failed");
/* Remove the handlers that succeeded. */
@@ -1012,7 +1014,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
config_handler->vh_func,
sc, config_handler->vh_priv);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"ddi_intr_add_handler failed");
/* Remove the handlers that succeeded. */
@@ -1030,7 +1032,7 @@ static int virtio_register_msi(struct virtio_softc *sc,
ret = ddi_intr_get_cap(sc->sc_intr_htable[0],
&sc->sc_intr_cap);
/* Just in case. */
- if (ret)
+ if (ret != DDI_SUCCESS)
sc->sc_intr_cap = 0;
out_add_handlers:
@@ -1080,15 +1082,14 @@ virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
return (DDI_INTR_CLAIMED);
}
-
/*
* config_handler and vq_handlers may be allocated on stack.
* Take precautions not to loose them.
*/
static int
virtio_register_intx(struct virtio_softc *sc,
- struct virtio_int_handler *config_handler,
- struct virtio_int_handler vq_handlers[])
+ struct virtio_int_handler *config_handler,
+ struct virtio_int_handler vq_handlers[])
{
int vq_handler_count;
int config_handler_count = 0;
@@ -1134,8 +1135,7 @@ virtio_register_intx(struct virtio_softc *sc,
ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
DDI_INTR_TYPE_FIXED, 0, 1, &actual,
DDI_INTR_ALLOC_NORMAL);
-
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to allocate a fixed interrupt: %d", ret);
goto out_int_alloc;
@@ -1145,14 +1145,14 @@ virtio_register_intx(struct virtio_softc *sc,
sc->sc_intr_num = 1;
ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
goto out_prio;
}
ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
virtio_intx_dispatch, sc, vhc);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
goto out_add_handlers;
}
@@ -1181,15 +1181,15 @@ virtio_register_intx(struct virtio_softc *sc,
*/
int
virtio_register_ints(struct virtio_softc *sc,
- struct virtio_int_handler *config_handler,
- struct virtio_int_handler vq_handlers[])
+ struct virtio_int_handler *config_handler,
+ struct virtio_int_handler vq_handlers[])
{
int ret;
int intr_types;
/* Determine which types of interrupts are supported */
ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
goto out_inttype;
}
@@ -1233,7 +1233,7 @@ virtio_enable_msi(struct virtio_softc *sc)
if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
ret = ddi_intr_block_enable(sc->sc_intr_htable,
sc->sc_intr_num);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to enable MSI, falling back to INTx");
goto out_enable;
@@ -1241,7 +1241,7 @@ virtio_enable_msi(struct virtio_softc *sc)
} else {
for (i = 0; i < sc->sc_intr_num; i++) {
ret = ddi_intr_enable(sc->sc_intr_htable[i]);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to enable MSI %d, "
"falling back to INTx", i);
@@ -1273,7 +1273,7 @@ virtio_enable_msi(struct virtio_softc *sc)
(uint16_t *)(sc->sc_io_addr +
VIRTIO_CONFIG_QUEUE_VECTOR));
if (check != i) {
- dev_err(sc->sc_dev, CE_WARN, "Failed to bind haneler"
+ dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler"
"for VQ %d, MSI %d. Check = %x", i, i, check);
ret = ENODEV;
goto out_bind;
@@ -1292,7 +1292,7 @@ virtio_enable_msi(struct virtio_softc *sc)
(uint16_t *)(sc->sc_io_addr +
VIRTIO_CONFIG_CONFIG_VECTOR));
if (check != i) {
- dev_err(sc->sc_dev, CE_WARN, "Failed to bind haneler "
+ dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
"for Config updates, MSI %d", i);
ret = ENODEV;
goto out_bind;
@@ -1332,7 +1332,7 @@ static int virtio_enable_intx(struct virtio_softc *sc)
int ret;
ret = ddi_intr_enable(sc->sc_intr_htable[0]);
- if (ret)
+ if (ret != DDI_SUCCESS)
dev_err(sc->sc_dev, CE_WARN,
"Failed to enable interrupt: %d", ret);
return (ret);
@@ -1388,7 +1388,7 @@ virtio_release_ints(struct virtio_softc *sc)
if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
ret = ddi_intr_block_disable(sc->sc_intr_htable,
sc->sc_intr_num);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to disable MSIs, won't be able to"
"reuse next time");
@@ -1396,7 +1396,7 @@ virtio_release_ints(struct virtio_softc *sc)
} else {
for (i = 0; i < sc->sc_intr_num; i++) {
ret = ddi_intr_disable(sc->sc_intr_htable[i]);
- if (ret) {
+ if (ret != DDI_SUCCESS) {
dev_err(sc->sc_dev, CE_WARN,
"Failed to disable interrupt %d, "
"won't be able to reuse", i);
View
8 virtio/virtiovar.h
@@ -71,13 +71,13 @@
#include <sys/cmn_err.h>
#include <sys/list.h>
+#ifdef DEBUG
#define TRACE { \
cmn_err(CE_NOTE, "^%s:%d %s()\n", __FILE__, __LINE__, __func__); \
}
-
-#define FAST_TRACE { \
- cmn_err(CE_NOTE, "^%s:%d %s()\n", __FILE__, __LINE__, __func__); \
-}
+#else
+#define TRACE
+#endif
typedef boolean_t bool;
#define __packed __attribute__((packed))
View
202 virtio_blk/vioblk.c
@@ -250,7 +250,7 @@ static ddi_dma_attr_t vioblk_bd_dma_attr = {
static int
vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
- uint32_t len)
+ uint32_t len)
{
struct vioblk_req *req;
struct vq_entry *ve_hdr;
@@ -319,7 +319,7 @@ vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
*/
static int
vioblk_rw_poll(struct vioblk_softc *sc, bd_xfer_t *xfer,
- int type, uint32_t len)
+ int type, uint32_t len)
{
clock_t tmout;
int ret;
@@ -465,17 +465,17 @@ vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
(void) memset(&xfer, 0, sizeof (bd_xfer_t));
xfer.x_nblks = 1;
- if (ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
- DDI_DMA_SLEEP, NULL, &xfer.x_dmah) != DDI_SUCCESS) {
- return (DDI_FAILURE);
- }
+ ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
+ DDI_DMA_SLEEP, NULL, &xfer.x_dmah);
+ if (ret != DDI_SUCCESS)
+ goto out_alloc;
- if (ddi_dma_addr_bind_handle(xfer.x_dmah, NULL, (caddr_t)&sc->devid,
+ ret = ddi_dma_addr_bind_handle(xfer.x_dmah, NULL, (caddr_t)&sc->devid,
VIRTIO_BLK_ID_BYTES, DDI_DMA_READ | DDI_DMA_CONSISTENT,
- DDI_DMA_SLEEP, NULL, &xfer.x_dmac, &xfer.x_ndmac) !=
- DDI_DMA_MAPPED) {
- ddi_dma_free_handle(&xfer.x_dmah);
- return (DDI_FAILURE);
+ DDI_DMA_SLEEP, NULL, &xfer.x_dmac, &xfer.x_ndmac);
+ if (ret != DDI_DMA_MAPPED) {
+ ret = DDI_FAILURE;
+ goto out_map;
}
mutex_enter(&sc->lock_devid);
@@ -484,11 +484,7 @@ vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
VIRTIO_BLK_ID_BYTES);
if (ret) {
mutex_exit(&sc->lock_devid);
-
- (void) ddi_dma_unbind_handle(xfer.x_dmah);
- ddi_dma_free_handle(&xfer.x_dmah);
-
- return (ret);
+ goto out_rw;
}
/* wait for reply */
@@ -501,7 +497,7 @@ vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
/* timeout */
if (ret < 0) {
dev_err(devinfo, CE_WARN, "Cannot get devid from the device");
- return (ret);
+ return (DDI_FAILURE);
}
ret = ddi_devid_init(devinfo, DEVID_ATA_SERIAL,
@@ -520,67 +516,18 @@ vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
sc->devid[16], sc->devid[17], sc->devid[18], sc->devid[19]);
return (0);
-}
-
-static int
-vioblk_match(dev_info_t *devinfo, ddi_acc_handle_t pconf)
-{
- uint16_t vendor, device, revision, subdevice, subvendor;
-
- vendor = pci_config_get16(pconf, PCI_CONF_VENID);
- device = pci_config_get16(pconf, PCI_CONF_DEVID);
- revision = pci_config_get8(pconf, PCI_CONF_REVID);
- subvendor = pci_config_get16(pconf, PCI_CONF_SUBVENID);
- subdevice = pci_config_get16(pconf, PCI_CONF_SUBSYSID);
-
- if (vendor != PCI_VENDOR_QUMRANET) {
- dev_err(devinfo, CE_WARN,
- "Vendor ID does not match: %x, expected %x",
- vendor, PCI_VENDOR_QUMRANET);
- return (DDI_FAILURE);
- }
-
- if (device < PCI_DEV_VIRTIO_MIN || device > PCI_DEV_VIRTIO_MAX) {
- dev_err(devinfo, CE_WARN,
- "Device ID is does not match: %x, expected"
- "between %x and %x", device, PCI_DEV_VIRTIO_MIN,
- PCI_DEV_VIRTIO_MAX);
- return (DDI_FAILURE);
- }
-
- if (revision != VIRTIO_PCI_ABI_VERSION) {
- dev_err(devinfo, CE_WARN,
- "Device revision does not match: %x, expected %x",
- revision, VIRTIO_PCI_ABI_VERSION);
- return (DDI_FAILURE);
- }
-
- if (subvendor != PCI_VENDOR_QUMRANET) {
- dev_err(devinfo, CE_WARN,
- "Sub-vendor ID does not match: %x, expected %x",
- vendor, PCI_VENDOR_QUMRANET);
- return (DDI_FAILURE);
- }
-
- if (subdevice != PCI_PRODUCT_VIRTIO_BLOCK) {
- dev_err(devinfo, CE_NOTE,
- "Subsystem ID does not match: %x, expected %x",
- vendor, PCI_VENDOR_QUMRANET);
- dev_err(devinfo, CE_NOTE,
- "This is a virtio device, but not virtio-blk, "
- "skipping");
- return (DDI_FAILURE);
- }
-
- dev_debug(devinfo, CE_NOTE, "Matched successfully");
-
- return (DDI_SUCCESS);
+out_rw:
+ (void) ddi_dma_unbind_handle(xfer.x_dmah);
+out_map:
+ ddi_dma_free_handle(&xfer.x_dmah);
+out_alloc:
+ return (ret);
}
static void
vioblk_show_features(struct vioblk_softc *sc, const char *prefix,
- uint32_t features)
+ uint32_t features)
{
char buf[512];
char *bufp = buf;
@@ -739,7 +686,6 @@ vioblk_int_handler(caddr_t arg1, caddr_t arg2)
return (DDI_INTR_CLAIMED);
}
-/* ARGSUSED */
uint_t
vioblk_config_handler(caddr_t arg1, caddr_t arg2)
{
@@ -769,10 +715,31 @@ vioblk_register_ints(struct vioblk_softc *sc)
return (ret);
}
+static void
+vioblk_free_reqs(struct vioblk_softc *sc)
+{
+ int i, qsize;
+
+ qsize = sc->sc_vq->vq_num;
+
+ for (i = 0; i < qsize; i++) {
+ struct vioblk_req *req = &sc->sc_reqs[i];
+
+ if (req->ndmac)
+ (void) ddi_dma_unbind_handle(req->dmah);
+
+ if (req->dmah)
+ ddi_dma_free_handle(&req->dmah);
+ }
+
+ kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
+}
+
static int
vioblk_alloc_reqs(struct vioblk_softc *sc)
{
int i, qsize;
+ int ret;
qsize = sc->sc_vq->vq_num;
@@ -786,8 +753,9 @@ vioblk_alloc_reqs(struct vioblk_softc *sc)
for (i = 0; i < qsize; i++) {
struct vioblk_req *req = &sc->sc_reqs[i];
- if (ddi_dma_alloc_handle(sc->sc_dev, &vioblk_req_dma_attr,
- DDI_DMA_SLEEP, NULL, &req->dmah)) {
+ ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_req_dma_attr,
+ DDI_DMA_SLEEP, NULL, &req->dmah);
+ if (ret != DDI_SUCCESS){
dev_err(sc->sc_dev, CE_WARN,
"Can't allocate dma handle for req "
@@ -795,11 +763,12 @@ vioblk_alloc_reqs(struct vioblk_softc *sc)
goto exit;
}
- if (ddi_dma_addr_bind_handle(req->dmah, NULL,
+ ret = ddi_dma_addr_bind_handle(req->dmah, NULL,
(caddr_t)&req->hdr,
sizeof (struct vioblk_req_hdr) + sizeof (uint8_t),
DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
- NULL, &req->dmac, &req->ndmac)) {
+ NULL, &req->dmac, &req->ndmac);
+ if (ret != DDI_DMA_MAPPED) {
dev_err(sc->sc_dev, CE_WARN,
"Can't bind req buffer %d", i);
goto exit;
@@ -809,39 +778,10 @@ vioblk_alloc_reqs(struct vioblk_softc *sc)
return (0);
exit:
- for (i = 0; i < qsize; i++) {
- struct vioblk_req *req = &sc->sc_reqs[i];
-
- if (req->ndmac)
- (void) ddi_dma_unbind_handle(req->dmah);
-
- if (req->dmah)
- ddi_dma_free_handle(&req->dmah);
- }
-
- kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
+ vioblk_free_reqs(sc);
return (ENOMEM);
}
-static void
-vioblk_free_reqs(struct vioblk_softc *sc)
-{
- int i, qsize;
-
- qsize = sc->sc_vq->vq_num;
-
- for (i = 0; i < qsize; i++) {
- struct vioblk_req *req = &sc->sc_reqs[i];
-
- if (req->ndmac)
- (void) ddi_dma_unbind_handle(req->dmah);
-
- if (req->dmah)
- ddi_dma_free_handle(&req->dmah);
- }
-
- kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
-}
static int
vioblk_ksupdate(kstat_t *ksp, int rw)
@@ -870,7 +810,6 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
int ret, instance;
struct vioblk_softc *sc;
struct virtio_softc *vsc;
- ddi_acc_handle_t pci_conf;
struct vioblk_stats *ks_data;
instance = ddi_get_instance(devinfo);
@@ -881,12 +820,12 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
case DDI_RESUME:
case DDI_PM_RESUME:
- dev_err(devinfo, CE_WARN, "resume unsupported yet");
+ dev_err(devinfo, CE_WARN, "resume not supported yet");
ret = DDI_FAILURE;
goto exit;
default:
- dev_err(devinfo, CE_WARN, "cmd 0x%x unrecognized", cmd);
+ dev_err(devinfo, CE_WARN, "cmd 0x%x not recognized", cmd);
ret = DDI_FAILURE;
goto exit;
}
@@ -905,18 +844,6 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
sc->sc_dev = devinfo;
vsc->sc_dev = devinfo;
- ret = pci_config_setup(devinfo, &pci_conf);
- if (ret) {
- dev_err(devinfo, CE_WARN, "unable to setup PCI config handle");
- goto exit_pci_conf;
-
- }
-
- ret = vioblk_match(devinfo, pci_conf);
- pci_config_teardown(&pci_conf);
- if (ret)
- goto exit_match;
-
cv_init(&sc->cv_devid, NULL, CV_DRIVER, NULL);
mutex_init(&sc->lock_devid, NULL, MUTEX_DRIVER, NULL);
@@ -1002,18 +929,6 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
vioblk_ops.o_sync_cache = NULL;
}
-#if 0
- /* Implement me some day */
- if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_GEOMETRY) {
- int ncyls, nheads, nsects;
- ncyls = virtio_read_device_config_2(&sc->sc_virtio,
- VIRTIO_BLK_CONFIG_GEOMETRY_C);
- nheads = virtio_read_device_config_1(&sc->sc_virtio,
- VIRTIO_BLK_CONFIG_GEOMETRY_H);
- nsects = virtio_read_device_config_1(&sc->sc_virtio,
- VIRTIO_BLK_CONFIG_GEOMETRY_S);
- }
-#endif
sc->sc_seg_max = DEF_MAXINDIRECT;
/* The max number of segments (cookies) in a request */
if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SEG_MAX) {
@@ -1037,17 +952,6 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
/* The maximum (optimal) size for a cookie in a request. */
sc->sc_sector_max = DEF_MAXSECTOR;
- /*
- * The linux ****tards ****** up with the virtio spec here. See linux
- * commit 69740c8b and check the virtio spec. Just ignore it until
- * this is sorted out.
- */
-#if 0
- if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_TOPOLOGY) {
- sc->sc_sector_max = virtio_read_device_config_4(&sc->sc_virtio,
- VIRTIO_BLK_CONFIG_TOPOLOGY);
- }
-#endif
/* The maximum request size */
sc->sc_size_max = vioblk_bd_dma_attr.dma_attr_sgllen *
@@ -1058,7 +962,7 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
}
vioblk_bd_dma_attr.dma_attr_maxxfer = sc->sc_size_max;
- dev_debug(devinfo, CE_NOTE, "nblks=%d blksize=%d maxxfer=%d "
+ dev_debug(devinfo, CE_NOTE, "nblks=%lu blksize=%d maxxfer=%d "
"max data segments %d",
sc->sc_nblks, sc->sc_blk_size, sc->sc_size_max,
vioblk_bd_dma_attr.dma_attr_sgllen);
@@ -1113,13 +1017,11 @@ vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
exit_features:
virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
-exit_intrstat:
exit_map:
kstat_delete(sc->sc_intrstat);
+exit_intrstat:
mutex_destroy(&sc->lock_devid);
cv_destroy(&sc->cv_devid);
-exit_match:
-exit_pci_conf:
kmem_free(sc, sizeof (struct vioblk_softc));
exit:
return (ret);
Please sign in to comment.
Something went wrong with that request. Please try again.