Permalink
Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
16325 lines (16235 sloc) 475 KB
From d69504bae22e23771b7e197b24864886280cf4d0 Mon Sep 17 00:00:00 2001
From: Udit kumar agarwal <dev.madaari@gmail.com>
Date: Tue, 3 Jul 2018 14:37:42 +0530
Subject: [PATCH] Initial MMCCAM port for RTEMS
- Currently driver is able to to Detect and initialize all sort of SD/MMC/SDIO cards. It's able to read and modify registers.
- However, several bugs exist in the part responsible for mounting the partitions using RTEMS media server.
- More details on this can be found here:http://uditagarwal.in/index.php/2018/08/01/rtems-sdio-driver-current-progress/
- This patch has been tested with rtems-libbsd commit id: commit id: 137250239e9e0b244eb924928e8d1ec7996dcfef
- For details on how to use this patch, visit here: http://uditagarwal.in/index.php/2018/08/03/gsoc-2018-final-report/#use_sdio_code
---
buildset/default-mmccam.ini | 13 +
freebsd-org | 2 +-
freebsd/sys/arm/include/md_var.h | 76 +
freebsd/sys/arm/ti/ti_sdhci.c | 29 +-
freebsd/sys/cam/cam.h | 2 +-
freebsd/sys/cam/cam_ccb.h | 87 +-
freebsd/sys/cam/cam_compat.c | 426 ++
freebsd/sys/cam/cam_compat.h | 223 +
freebsd/sys/cam/cam_debug.h | 2 +-
freebsd/sys/cam/cam_periph.c | 2018 +++++++
freebsd/sys/cam/cam_periph.h | 2 +-
freebsd/sys/cam/cam_queue.c | 399 ++
freebsd/sys/cam/cam_queue.h | 291 ++
freebsd/sys/cam/cam_sim.h | 10 +-
freebsd/sys/cam/cam_xpt.c | 5491 ++++++++++++++++++++
freebsd/sys/cam/cam_xpt.h | 13 +
freebsd/sys/cam/cam_xpt_sim.h | 4 -
freebsd/sys/cam/mmc/mmc.h | 106 +
freebsd/sys/cam/mmc/mmc_all.h | 70 +
freebsd/sys/cam/mmc/mmc_bus.h | 5 +
freebsd/sys/cam/mmc/mmc_da.c | 2187 ++++++++
freebsd/sys/cam/mmc/mmc_xpt.c | 1110 ++++
freebsd/sys/cam/scsi/scsi_all.c | 2 +-
freebsd/sys/dev/mmc/bridge.h | 2 +
freebsd/sys/dev/mmc/mmcbrvar.h | 1 -
freebsd/sys/dev/mmc/mmcreg.h | 102 +
freebsd/sys/dev/sdhci/fsl_sdhci.c | 998 ++++
freebsd/sys/dev/sdhci/sdhci.c | 566 +-
freebsd/sys/dev/sdhci/sdhci.h | 13 +
freebsd/sys/sys/devicestat.h | 206 +
libbsd.py | 45 +
rtemsbsd/include/bsp/nexus-devices.h | 6 +-
rtemsbsd/include/cam/cam_queue.h | 294 +-
rtemsbsd/include/cam/cam_xpt_internal.h | 212 +-
rtemsbsd/include/cam/cam_xpt_periph.h | 260 +-
.../include/machine/rtems-bsd-kernel-namespace.h | 3 -
rtemsbsd/include/rtems/bsd/local/opt_cam.h | 6 +
rtemsbsd/include/rtems/bsd/local/opt_mmccam.h | 4 +
rtemsbsd/rtems/rtems-kernel-cam.c | 119 +-
39 files changed, 15347 insertions(+), 58 deletions(-)
create mode 100644 buildset/default-mmccam.ini
create mode 100644 freebsd/sys/arm/include/md_var.h
create mode 100644 freebsd/sys/cam/cam_compat.c
create mode 100644 freebsd/sys/cam/cam_compat.h
create mode 100644 freebsd/sys/cam/cam_periph.c
create mode 100644 freebsd/sys/cam/cam_queue.c
create mode 100644 freebsd/sys/cam/cam_queue.h
create mode 100644 freebsd/sys/cam/cam_xpt.c
create mode 100644 freebsd/sys/cam/mmc/mmc.h
create mode 100644 freebsd/sys/cam/mmc/mmc_all.h
create mode 100644 freebsd/sys/cam/mmc/mmc_bus.h
create mode 100644 freebsd/sys/cam/mmc/mmc_da.c
create mode 100644 freebsd/sys/cam/mmc/mmc_xpt.c
create mode 100644 freebsd/sys/dev/sdhci/fsl_sdhci.c
create mode 100644 freebsd/sys/sys/devicestat.h
create mode 100644 rtemsbsd/include/rtems/bsd/local/opt_mmccam.h
diff --git a/buildset/default-mmccam.ini b/buildset/default-mmccam.ini
new file mode 100644
index 0000000..2cdce6a
--- /dev/null
+++ b/buildset/default-mmccam.ini
@@ -0,0 +1,13 @@
+#
+# Default configuration. Contains most features except for some big or slow ones
+# like WiFi or IPSec.
+#
+# At all developers: Please allways add all modules to this file and mark them
+# as explicitly "off" if they are not used.
+#
+
+[general]
+name = default-mmccam
+extends = default.ini
+[modules]
+mmccam = on
diff --git a/freebsd-org b/freebsd-org
index 642b174..f2ecfd4 160000
--- a/freebsd-org
+++ b/freebsd-org
@@ -1 +1 @@
-Subproject commit 642b174daddbd0efd9bb5f242c43f4ab4db6869f
+Subproject commit f2ecfd4517d49677df7d782b83c1b305dabe116a
diff --git a/freebsd/sys/arm/include/md_var.h b/freebsd/sys/arm/include/md_var.h
new file mode 100644
index 0000000..642124d
--- /dev/null
+++ b/freebsd/sys/arm/include/md_var.h
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 1995 Bruce D. Evans.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/md_var.h,v 1.40 2001/07/12
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+extern long Maxmem;
+extern char sigcode[];
+extern int szsigcode;
+extern uint32_t *vm_page_dump;
+extern int vm_page_dump_size;
+
+extern int (*_arm_memcpy)(void *, void *, int, int);
+extern int (*_arm_bzero)(void *, int, int);
+
+extern int _min_memcpy_size;
+extern int _min_bzero_size;
+
+#define DST_IS_USER 0x1
+#define SRC_IS_USER 0x2
+#define IS_PHYSICAL 0x4
+
+enum cpu_class {
+ CPU_CLASS_NONE,
+ CPU_CLASS_ARM9TDMI,
+ CPU_CLASS_ARM9ES,
+ CPU_CLASS_ARM9EJS,
+ CPU_CLASS_ARM10E,
+ CPU_CLASS_ARM10EJ,
+ CPU_CLASS_CORTEXA,
+ CPU_CLASS_KRAIT,
+ CPU_CLASS_XSCALE,
+ CPU_CLASS_ARM11J,
+ CPU_CLASS_MARVELL
+};
+extern enum cpu_class cpu_class;
+
+struct dumperinfo;
+extern int busdma_swi_pending;
+void busdma_swi(void);
+void dump_add_page(vm_paddr_t);
+void dump_drop_page(vm_paddr_t);
+int minidumpsys(struct dumperinfo *);
+
+extern uint32_t initial_fpscr;
+
+#endif /* !_MACHINE_MD_VAR_H_ */
diff --git a/freebsd/sys/arm/ti/ti_sdhci.c b/freebsd/sys/arm/ti/ti_sdhci.c
index 94096fd..58d178a 100644
--- a/freebsd/sys/arm/ti/ti_sdhci.c
+++ b/freebsd/sys/arm/ti/ti_sdhci.c
@@ -41,6 +41,8 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -62,6 +64,8 @@ __FBSDID("$FreeBSD$");
#include <arm/ti/ti_hwmods.h>
#include <rtems/bsd/local/gpio_if.h>
+#include <rtems/bsd/local/opt_mmccam.h>
+
struct ti_sdhci_softc {
device_t dev;
struct sdhci_fdt_gpio * gpio;
@@ -124,6 +128,11 @@ static struct ofw_compat_data compat_data[] = {
#define MMCHS_SD_CAPA_VS30 (1 << 25)
#define MMCHS_SD_CAPA_VS33 (1 << 24)
+/* Forward declarations, CAM-relataed */
+// static void ti_sdhci_cam_poll(struct cam_sim *);
+// static void ti_sdhci_cam_action(struct cam_sim *, union ccb *);
+// static int ti_sdhci_cam_settran_settings(struct ti_sdhci_softc *sc, union ccb *);
+
static inline uint32_t
ti_mmchs_read_4(struct ti_sdhci_softc *sc, bus_size_t off)
{
@@ -243,6 +252,22 @@ ti_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off,
struct ti_sdhci_softc *sc = device_get_softc(dev);
uint32_t val32;
+#ifdef MMCCAM
+ uint32_t newval32;
+ if (off == SDHCI_HOST_CONTROL) {
+ val32 = ti_mmchs_read_4(sc, MMCHS_CON);
+ newval32 = val32;
+ if (val & SDHCI_CTRL_8BITBUS) {
+ device_printf(dev, "Custom-enabling 8-bit bus\n");
+ newval32 |= MMCHS_CON_DW8;
+ } else {
+ device_printf(dev, "Custom-disabling 8-bit bus\n");
+ newval32 &= ~MMCHS_CON_DW8;
+ }
+ if (newval32 != val32)
+ ti_mmchs_write_4(sc, MMCHS_CON, newval32);
+ }
+#endif
val32 = RD4(sc, off & ~3);
val32 &= ~(0xff << (off & 3) * 8);
val32 |= (val << (off & 3) * 8);
@@ -661,7 +686,6 @@ ti_sdhci_attach(device_t dev)
bus_generic_attach(dev);
sdhci_start_slot(&sc->slot);
-
return (0);
fail:
@@ -732,4 +756,7 @@ static driver_t ti_sdhci_driver = {
DRIVER_MODULE(sdhci_ti, simplebus, ti_sdhci_driver, ti_sdhci_devclass, NULL,
NULL);
MODULE_DEPEND(sdhci_ti, sdhci, 1, 1, 1);
+
+#ifndef MMCCAM
MMC_DECLARE_BRIDGE(sdhci_ti);
+#endif
diff --git a/freebsd/sys/cam/cam.h b/freebsd/sys/cam/cam.h
index 23feb50..b39c625 100644
--- a/freebsd/sys/cam/cam.h
+++ b/freebsd/sys/cam/cam.h
@@ -33,7 +33,7 @@
#ifdef _KERNEL
#ifndef __rtems__
-#include <opt_cam.h>
+#include <rtems/bsd/local/opt_cam.h>
#else /* __rtems__ */
#include <rtems/bsd/local/opt_cam.h>
#endif /* __rtems__ */
diff --git a/freebsd/sys/cam/cam_ccb.h b/freebsd/sys/cam/cam_ccb.h
index 99249f4..9f9b0cc 100644
--- a/freebsd/sys/cam/cam_ccb.h
+++ b/freebsd/sys/cam/cam_ccb.h
@@ -42,6 +42,7 @@
#include <cam/scsi/scsi_all.h>
#include <cam/ata/ata_all.h>
#include <cam/nvme/nvme_all.h>
+#include <cam/mmc/mmc_all.h>
#ifdef __rtems__
#include <rtems/blkdev.h>
#endif /* __rtems__ */
@@ -211,10 +212,10 @@ typedef enum {
XPT_NVME_IO = 0x1c | XPT_FC_DEV_QUEUED,
/* Execiute the requestred NVMe I/O operation */
- XPT_MMCSD_IO = 0x1d | XPT_FC_DEV_QUEUED,
+ XPT_MMC_IO = 0x1d | XPT_FC_DEV_QUEUED,
/* Placeholder for MMC / SD / SDIO I/O stuff */
- XPT_SCAN_TGT = 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB
+ XPT_SCAN_TGT = 0x1e | XPT_FC_QUEUED | XPT_FC_USER_CCB
| XPT_FC_XPT_ONLY,
/* Scan Target */
@@ -270,6 +271,7 @@ typedef enum {
PROTO_SATAPM, /* SATA Port Multiplier */
PROTO_SEMB, /* SATA Enclosure Management Bridge */
PROTO_NVME, /* NVME */
+ PROTO_MMCSD, /* MMC, SD, SDIO */
} cam_proto;
typedef enum {
@@ -286,6 +288,7 @@ typedef enum {
XPORT_ISCSI, /* iSCSI */
XPORT_SRP, /* SCSI RDMA Protocol */
XPORT_NVME, /* NVMe over PCIe */
+ XPORT_MMCSD, /* MMC, SD, SDIO card */
} cam_xport;
#define XPORT_IS_NVME(t) ((t) == XPORT_NVME)
@@ -332,36 +335,27 @@ typedef struct {
} ccb_qos_area;
struct ccb_hdr {
-#ifndef __rtems__
cam_pinfo pinfo; /* Info for priority scheduling */
camq_entry xpt_links; /* For chaining in the XPT layer */
camq_entry sim_links; /* For chaining in the SIM layer */
camq_entry periph_links; /* For chaining in the type driver */
-#else /* __rtems__ */
struct cam_sim *sim;
-#endif /* __rtems__ */
u_int32_t retry_count;
void (*cbfcnp)(struct cam_periph *, union ccb *);
/* Callback on completion function */
xpt_opcode func_code; /* XPT function code */
u_int32_t status; /* Status returned by CAM subsystem */
-#ifndef __rtems__
struct cam_path *path; /* Compiled path for this ccb */
path_id_t path_id; /* Path ID for the request */
-#endif /* __rtems__ */
target_id_t target_id; /* Target device ID */
lun_id_t target_lun; /* Target LUN number */
u_int32_t flags; /* ccb_flags */
u_int32_t xflags; /* Extended flags */
-#ifndef __rtems__
ccb_ppriv_area periph_priv;
ccb_spriv_area sim_priv;
ccb_qos_area qos;
-#endif /* __rtems__ */
u_int32_t timeout; /* Hard timeout value in mseconds */
-#ifndef __rtems__
struct timeval softtimeout; /* Soft timeout value in sec + usec */
-#endif /* __rtems__ */
};
/* Get Device Information CCB */
@@ -792,6 +786,16 @@ struct ccb_ataio {
uint32_t unused;
};
+/*
+ * MMC I/O Request CCB used for the XPT_MMC_IO function code.
+ */
+struct ccb_mmcio {
+ struct ccb_hdr ccb_h;
+ union ccb *next_ccb; /* Ptr for next CCB for action */
+ struct mmc_command cmd;
+ struct mmc_command stop;
+};
+
struct ccb_accept_tio {
struct ccb_hdr ccb_h;
cdb_t cdb_io; /* Union for CDB bytes/pointer */
@@ -1028,7 +1032,28 @@ struct ccb_trans_settings_nvme
u_int max_xfer; /* Max transfer size (0 -> unlimited */
u_int caps;
};
-
+
+#include <cam/mmc/mmc_bus.h>
+struct ccb_trans_settings_mmc {
+ struct mmc_ios ios;
+#define MMC_CLK (1 << 1)
+#define MMC_VDD (1 << 2)
+#define MMC_CS (1 << 3)
+#define MMC_BW (1 << 4)
+#define MMC_PM (1 << 5)
+#define MMC_BT (1 << 6)
+#define MMC_BM (1 << 7)
+ uint32_t ios_valid;
+/* The folowing is used only for GET_TRAN_SETTINGS */
+ uint32_t host_ocr;
+ int host_f_min;
+ int host_f_max;
+#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can do 4-bit data transfers */
+#define MMC_CAP_8_BIT_DATA (1 << 1) /* Can do 8-bit data transfers */
+#define MMC_CAP_HSPEED (1 << 2) /* Can do High Speed transfers */
+ uint32_t host_caps;
+};
+
/* Get/Set transfer rate/width/disconnection/tag queueing settings */
struct ccb_trans_settings {
struct ccb_hdr ccb_h;
@@ -1042,6 +1067,7 @@ struct ccb_trans_settings {
struct ccb_trans_settings_ata ata;
struct ccb_trans_settings_scsi scsi;
struct ccb_trans_settings_nvme nvme;
+ struct ccb_trans_settings_mmc mmc;
} proto_specific;
union {
u_int valid; /* Which fields to honor */
@@ -1250,6 +1276,7 @@ struct ccb_dev_advinfo {
#define CDAI_TYPE_PHYS_PATH 3
#define CDAI_TYPE_RCAPLONG 4
#define CDAI_TYPE_EXT_INQ 5
+#define CDAI_TYPE_MMC_PARAMS 8
off_t bufsiz; /* IN: Size of external buffer */
#define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */
off_t provsiz; /* OUT: Size required/used */
@@ -1307,6 +1334,7 @@ union ccb {
struct ccb_dev_advinfo cdai;
struct ccb_async casync;
struct ccb_nvmeio nvmeio;
+ struct ccb_mmcio mmcio;
};
#define CCB_CLEAR_ALL_EXCEPT_HDR(ccbp) \
@@ -1350,6 +1378,13 @@ cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries,
uint32_t timeout);
static __inline void
+cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
+ uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags,
+ struct mmc_data *mmc_d,
+ uint32_t timeout);
+
+static __inline void
cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
void (*cbfcnp)(struct cam_periph *, union ccb *),
u_int32_t flags, u_int8_t tag_action,
@@ -1438,6 +1473,34 @@ cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries,
}
static __inline void
+cam_fill_mmcio(struct ccb_mmcio *mmcio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags,
+ uint32_t mmc_opcode, uint32_t mmc_arg, uint32_t mmc_flags,
+ struct mmc_data *mmc_d,
+ uint32_t timeout)
+{
+ mmcio->ccb_h.func_code = XPT_MMC_IO;
+ mmcio->ccb_h.flags = flags;
+ mmcio->ccb_h.retry_count = retries;
+ mmcio->ccb_h.cbfcnp = cbfcnp;
+ mmcio->ccb_h.timeout = timeout;
+ mmcio->cmd.opcode = mmc_opcode;
+ mmcio->cmd.arg = mmc_arg;
+ mmcio->cmd.flags = mmc_flags;
+ mmcio->stop.opcode = 0;
+ mmcio->stop.arg = 0;
+ mmcio->stop.flags = 0;
+ if (mmc_d != NULL) {
+ mmcio->cmd.data = mmc_d;
+ } else
+ mmcio->cmd.data = NULL;
+ mmcio->cmd.resp[0] = 0;
+ mmcio->cmd.resp[1] = 0;
+ mmcio->cmd.resp[2] = 0;
+ mmcio->cmd.resp[3] = 0;
+}
+
+static __inline void
cam_set_ccbstatus(union ccb *ccb, cam_status status)
{
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
diff --git a/freebsd/sys/cam/cam_compat.c b/freebsd/sys/cam/cam_compat.c
new file mode 100644
index 0000000..7a77631
--- /dev/null
+++ b/freebsd/sys/cam/cam_compat.c
@@ -0,0 +1,426 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * CAM ioctl compatibility shims
+ *
+ * Copyright (c) 2013 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/kthread.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_xpt.h>
+#include <cam/cam_compat.h>
+#include <cam/cam_periph.h>
+#ifndef __rtems__
+#include <cam/scsi/scsi_pass.h>
+#else
+#define CAMIOCOMMAND _IOWR(CAM_VERSION, 2, union ccb)
+#define CAMGETPASSTHRU _IOWR(CAM_VERSION, 3, union ccb)
+#endif
+#include <rtems/bsd/local/opt_cam.h>
+
+static int cam_compat_handle_0x17(struct cdev *dev, u_long cmd, caddr_t addr,
+ int flag, struct thread *td, d_ioctl_t *cbfnp);
+static int cam_compat_handle_0x18(struct cdev *dev, u_long cmd, caddr_t addr,
+ int flag, struct thread *td, d_ioctl_t *cbfnp);
+static int cam_compat_translate_dev_match_0x18(union ccb *ccb);
+
+int
+cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td, d_ioctl_t *cbfnp)
+{
+ int error;
+
+ switch (cmd) {
+ case CAMIOCOMMAND_0x16:
+ {
+ struct ccb_hdr_0x17 *hdr17;
+
+ hdr17 = (struct ccb_hdr_0x17 *)addr;
+ if (hdr17->flags & CAM_SG_LIST_PHYS_0x16) {
+ hdr17->flags &= ~CAM_SG_LIST_PHYS_0x16;
+ hdr17->flags |= CAM_DATA_SG_PADDR;
+ }
+ if (hdr17->flags & CAM_DATA_PHYS_0x16) {
+ hdr17->flags &= ~CAM_DATA_PHYS_0x16;
+ hdr17->flags |= CAM_DATA_PADDR;
+ }
+ if (hdr17->flags & CAM_SCATTER_VALID_0x16) {
+ hdr17->flags &= CAM_SCATTER_VALID_0x16;
+ hdr17->flags |= CAM_DATA_SG;
+ }
+ cmd = CAMIOCOMMAND;
+ error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+ break;
+ }
+ case CAMGETPASSTHRU_0x16:
+ cmd = CAMGETPASSTHRU;
+ error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+ break;
+ case CAMIOCOMMAND_0x17:
+ cmd = CAMIOCOMMAND;
+ error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+ break;
+ case CAMGETPASSTHRU_0x17:
+ cmd = CAMGETPASSTHRU;
+ error = cam_compat_handle_0x17(dev, cmd, addr, flag, td, cbfnp);
+ break;
+ case CAMIOCOMMAND_0x18:
+ cmd = CAMIOCOMMAND;
+ error = cam_compat_handle_0x18(dev, cmd, addr, flag, td, cbfnp);
+ break;
+ case CAMGETPASSTHRU_0x18:
+ cmd = CAMGETPASSTHRU;
+ error = cam_compat_handle_0x18(dev, cmd, addr, flag, td, cbfnp);
+ break;
+ default:
+ error = ENOTTY;
+ }
+
+ return (error);
+}
+
+static int
+cam_compat_handle_0x17(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td, d_ioctl_t *cbfnp)
+{
+ union ccb *ccb;
+ struct ccb_hdr *hdr;
+ struct ccb_hdr_0x17 *hdr17;
+ uint8_t *ccbb, *ccbb17;
+ u_int error;
+
+ hdr17 = (struct ccb_hdr_0x17 *)addr;
+ ccb = xpt_alloc_ccb();
+ hdr = &ccb->ccb_h;
+
+ hdr->pinfo = hdr17->pinfo;
+ hdr->xpt_links = hdr17->xpt_links;
+ hdr->sim_links = hdr17->sim_links;
+ hdr->periph_links = hdr17->periph_links;
+ hdr->retry_count = hdr17->retry_count;
+ hdr->cbfcnp = hdr17->cbfcnp;
+ hdr->func_code = hdr17->func_code;
+ hdr->status = hdr17->status;
+ hdr->path = hdr17->path;
+ hdr->path_id = hdr17->path_id;
+ hdr->target_id = hdr17->target_id;
+ hdr->target_lun = hdr17->target_lun;
+ hdr->flags = hdr17->flags;
+ hdr->xflags = 0;
+ hdr->periph_priv = hdr17->periph_priv;
+ hdr->sim_priv = hdr17->sim_priv;
+ hdr->timeout = hdr17->timeout;
+ hdr->softtimeout.tv_sec = 0;
+ hdr->softtimeout.tv_usec = 0;
+
+ ccbb = (uint8_t *)&hdr[1];
+ ccbb17 = (uint8_t *)&hdr17[1];
+ if (ccb->ccb_h.func_code == XPT_SET_TRAN_SETTINGS) {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_0x17 *cts17;
+
+ cts = &ccb->cts;
+ cts17 = (struct ccb_trans_settings_0x17 *)hdr17;
+ cts->type = cts17->type;
+ cts->protocol = cts17->protocol;
+ cts->protocol_version = cts17->protocol_version;
+ cts->transport = cts17->transport;
+ cts->transport_version = cts17->transport_version;
+ bcopy(&cts17->proto_specific, &cts->proto_specific,
+ sizeof(cts17->proto_specific));
+ bcopy(&cts17->xport_specific, &cts->xport_specific,
+ sizeof(cts17->xport_specific));
+ } else {
+ bcopy(ccbb17, ccbb, CAM_0X17_DATA_LEN);
+ }
+
+ error = (cbfnp)(dev, cmd, (caddr_t)ccb, flag, td);
+
+ hdr17->pinfo = hdr->pinfo;
+ hdr17->xpt_links = hdr->xpt_links;
+ hdr17->sim_links = hdr->sim_links;
+ hdr17->periph_links = hdr->periph_links;
+ hdr17->retry_count = hdr->retry_count;
+ hdr17->cbfcnp = hdr->cbfcnp;
+ hdr17->func_code = hdr->func_code;
+ hdr17->status = hdr->status;
+ hdr17->path = hdr->path;
+ hdr17->path_id = hdr->path_id;
+ hdr17->target_id = hdr->target_id;
+ hdr17->target_lun = hdr->target_lun;
+ hdr17->flags = hdr->flags;
+ hdr17->periph_priv = hdr->periph_priv;
+ hdr17->sim_priv = hdr->sim_priv;
+ hdr17->timeout = hdr->timeout;
+
+ if (ccb->ccb_h.func_code == XPT_PATH_INQ) {
+ struct ccb_pathinq *cpi;
+ struct ccb_pathinq_0x17 *cpi17;
+
+ /* The PATH_INQ only needs special handling on the way out */
+ cpi = &ccb->cpi;
+ cpi17 = (struct ccb_pathinq_0x17 *)hdr17;
+ cpi17->version_num = cpi->version_num;
+ cpi17->hba_inquiry = cpi->hba_inquiry;
+ cpi17->target_sprt = (u_int8_t)cpi->target_sprt;
+ cpi17->hba_misc = (u_int8_t)cpi->hba_misc;
+ cpi17->hba_eng_cnt = cpi->hba_eng_cnt;
+ bcopy(&cpi->vuhba_flags[0], &cpi17->vuhba_flags[0], VUHBALEN);
+ cpi17->max_target = cpi->max_target;
+ cpi17->max_lun = cpi->max_lun;
+ cpi17->async_flags = cpi->async_flags;
+ cpi17->hpath_id = cpi->hpath_id;
+ cpi17->initiator_id = cpi->initiator_id;
+ bcopy(&cpi->sim_vid[0], &cpi17->sim_vid[0], SIM_IDLEN);
+ bcopy(&cpi->hba_vid[0], &cpi17->hba_vid[0], HBA_IDLEN);
+ bcopy(&cpi->dev_name[0], &cpi17->dev_name[0], DEV_IDLEN);
+ cpi17->unit_number = cpi->unit_number;
+ cpi17->bus_id = cpi->bus_id;
+ cpi17->base_transfer_speed = cpi->base_transfer_speed;
+ cpi17->protocol = cpi->protocol;
+ cpi17->protocol_version = cpi->protocol_version;
+ cpi17->transport = cpi->transport;
+ cpi17->transport_version = cpi->transport_version;
+ bcopy(&cpi->xport_specific, &cpi17->xport_specific,
+ PATHINQ_SETTINGS_SIZE);
+ cpi17->maxio = cpi->maxio;
+ cpi17->hba_vendor = cpi->hba_vendor;
+ cpi17->hba_device = cpi->hba_device;
+ cpi17->hba_subvendor = cpi->hba_subvendor;
+ cpi17->hba_subdevice = cpi->hba_subdevice;
+ } else if (ccb->ccb_h.func_code == XPT_GET_TRAN_SETTINGS) {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_0x17 *cts17;
+
+ cts = &ccb->cts;
+ cts17 = (struct ccb_trans_settings_0x17 *)hdr17;
+ cts17->type = cts->type;
+ cts17->protocol = cts->protocol;
+ cts17->protocol_version = cts->protocol_version;
+ cts17->transport = cts->transport;
+ cts17->transport_version = cts->transport_version;
+ bcopy(&cts->proto_specific, &cts17->proto_specific,
+ sizeof(cts17->proto_specific));
+ bcopy(&cts->xport_specific, &cts17->xport_specific,
+ sizeof(cts17->xport_specific));
+ } else if (ccb->ccb_h.func_code == XPT_DEV_MATCH) {
+ /* Copy the rest of the header over */
+ bcopy(ccbb, ccbb17, CAM_0X17_DATA_LEN);
+
+ cam_compat_translate_dev_match_0x18(ccb);
+ } else {
+ bcopy(ccbb, ccbb17, CAM_0X17_DATA_LEN);
+ }
+
+ xpt_free_ccb(ccb);
+
+ return (error);
+}
+
+static int
+cam_compat_handle_0x18(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td, d_ioctl_t *cbfnp)
+{
+ union ccb *ccb;
+ struct ccb_hdr *hdr;
+ struct ccb_hdr_0x18 *hdr18;
+ uint8_t *ccbb, *ccbb18;
+ u_int error;
+
+ hdr18 = (struct ccb_hdr_0x18 *)addr;
+ ccb = xpt_alloc_ccb();
+ hdr = &ccb->ccb_h;
+
+ hdr->pinfo = hdr18->pinfo;
+ hdr->xpt_links = hdr18->xpt_links;
+ hdr->sim_links = hdr18->sim_links;
+ hdr->periph_links = hdr18->periph_links;
+ hdr->retry_count = hdr18->retry_count;
+ hdr->cbfcnp = hdr18->cbfcnp;
+ hdr->func_code = hdr18->func_code;
+ hdr->status = hdr18->status;
+ hdr->path = hdr18->path;
+ hdr->path_id = hdr18->path_id;
+ hdr->target_id = hdr18->target_id;
+ hdr->target_lun = hdr18->target_lun;
+ if (hdr18->xflags & CAM_EXTLUN_VALID_0x18)
+ hdr->target_lun = hdr18->ext_lun;
+ hdr->flags = hdr18->flags;
+ hdr->xflags = hdr18->xflags;
+ hdr->periph_priv = hdr18->periph_priv;
+ hdr->sim_priv = hdr18->sim_priv;
+ hdr->timeout = hdr18->timeout;
+ hdr->softtimeout.tv_sec = 0;
+ hdr->softtimeout.tv_usec = 0;
+
+ ccbb = (uint8_t *)&hdr[1];
+ ccbb18 = (uint8_t *)&hdr18[1];
+ if (ccb->ccb_h.func_code == XPT_SET_TRAN_SETTINGS) {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_0x18 *cts18;
+
+ cts = &ccb->cts;
+ cts18 = (struct ccb_trans_settings_0x18 *)hdr18;
+ cts->type = cts18->type;
+ cts->protocol = cts18->protocol;
+ cts->protocol_version = cts18->protocol_version;
+ cts->transport = cts18->transport;
+ cts->transport_version = cts18->transport_version;
+ bcopy(&cts18->proto_specific, &cts->proto_specific,
+ sizeof(cts18->proto_specific));
+ bcopy(&cts18->xport_specific, &cts->xport_specific,
+ sizeof(cts18->xport_specific));
+ } else {
+ bcopy(ccbb18, ccbb, CAM_0X18_DATA_LEN);
+ }
+
+ error = (cbfnp)(dev, cmd, (caddr_t)ccb, flag, td);
+
+ hdr18->pinfo = hdr->pinfo;
+ hdr18->xpt_links = hdr->xpt_links;
+ hdr18->sim_links = hdr->sim_links;
+ hdr18->periph_links = hdr->periph_links;
+ hdr18->retry_count = hdr->retry_count;
+ hdr18->cbfcnp = hdr->cbfcnp;
+ hdr18->func_code = hdr->func_code;
+ hdr18->status = hdr->status;
+ hdr18->path = hdr->path;
+ hdr18->path_id = hdr->path_id;
+ hdr18->target_id = hdr->target_id;
+ hdr18->target_lun = hdr->target_lun;
+ hdr18->ext_lun = hdr->target_lun;
+ hdr18->flags = hdr->flags;
+ hdr18->xflags = hdr->xflags | CAM_EXTLUN_VALID_0x18;
+ hdr18->periph_priv = hdr->periph_priv;
+ hdr18->sim_priv = hdr->sim_priv;
+ hdr18->timeout = hdr->timeout;
+
+ if (ccb->ccb_h.func_code == XPT_GET_TRAN_SETTINGS) {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_0x18 *cts18;
+
+ cts = &ccb->cts;
+ cts18 = (struct ccb_trans_settings_0x18 *)hdr18;
+ cts18->type = cts->type;
+ cts18->protocol = cts->protocol;
+ cts18->protocol_version = cts->protocol_version;
+ cts18->transport = cts->transport;
+ cts18->transport_version = cts->transport_version;
+ bcopy(&cts->proto_specific, &cts18->proto_specific,
+ sizeof(cts18->proto_specific));
+ bcopy(&cts->xport_specific, &cts18->xport_specific,
+ sizeof(cts18->xport_specific));
+ } else if (ccb->ccb_h.func_code == XPT_DEV_MATCH) {
+ bcopy(ccbb, ccbb18, CAM_0X18_DATA_LEN);
+ cam_compat_translate_dev_match_0x18(ccb);
+ } else {
+ bcopy(ccbb, ccbb18, CAM_0X18_DATA_LEN);
+ }
+
+ xpt_free_ccb(ccb);
+
+ return (error);
+}
+
+static int
+cam_compat_translate_dev_match_0x18(union ccb *ccb)
+{
+ struct dev_match_result *dm;
+ struct dev_match_result_0x18 *dm18;
+ struct cam_periph_map_info mapinfo;
+ int i;
+#ifndef __rtems__
+ /* Remap the CCB into kernel address space */
+ bzero(&mapinfo, sizeof(mapinfo));
+ cam_periph_mapmem(ccb, &mapinfo, MAXPHYS);
+#endif
+ dm = ccb->cdm.matches;
+ /* Translate in-place: old fields are smaller */
+ dm18 = (struct dev_match_result_0x18 *)(dm);
+
+ for (i = 0; i < ccb->cdm.num_matches; i++) {
+ dm18[i].type = dm[i].type;
+ switch (dm[i].type) {
+ case DEV_MATCH_PERIPH:
+ memcpy(&dm18[i].result.periph_result.periph_name,
+ &dm[i].result.periph_result.periph_name,
+ DEV_IDLEN);
+ dm18[i].result.periph_result.unit_number =
+ dm[i].result.periph_result.unit_number;
+ dm18[i].result.periph_result.path_id =
+ dm[i].result.periph_result.path_id;
+ dm18[i].result.periph_result.target_id =
+ dm[i].result.periph_result.target_id;
+ dm18[i].result.periph_result.target_lun =
+ dm[i].result.periph_result.target_lun;
+ break;
+ case DEV_MATCH_DEVICE:
+ dm18[i].result.device_result.path_id =
+ dm[i].result.device_result.path_id;
+ dm18[i].result.device_result.target_id =
+ dm[i].result.device_result.target_id;
+ dm18[i].result.device_result.target_lun =
+ dm[i].result.device_result.target_lun;
+ dm18[i].result.device_result.protocol =
+ dm[i].result.device_result.protocol;
+ memcpy(&dm18[i].result.device_result.inq_data,
+ &dm[i].result.device_result.inq_data,
+ sizeof(struct scsi_inquiry_data));
+ memcpy(&dm18[i].result.device_result.ident_data,
+ &dm[i].result.device_result.ident_data,
+ sizeof(struct ata_params));
+ dm18[i].result.device_result.flags =
+ dm[i].result.device_result.flags;
+ break;
+ case DEV_MATCH_BUS:
+ memcpy(&dm18[i].result.bus_result,
+ &dm[i].result.bus_result,
+ sizeof(struct bus_match_result));
+ break;
+ }
+ }
+#ifndef __rtems__
+ cam_periph_unmapmem(ccb, &mapinfo);
+#endif
+ return (0);
+}
+
diff --git a/freebsd/sys/cam/cam_compat.h b/freebsd/sys/cam/cam_compat.h
new file mode 100644
index 0000000..a939087
--- /dev/null
+++ b/freebsd/sys/cam/cam_compat.h
@@ -0,0 +1,223 @@
+/*-
+ * CAM ioctl compatibility shims
+ *
+ * Copyright (c) 2013 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_COMPAT_H
+#define _CAM_CAM_COMPAT_H
+
+/* No user-serviceable parts in here. */
+#ifdef _KERNEL
+
+int cam_compat_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td, int(*cbfnp)(struct cdev *, u_long, caddr_t, int,
+ struct thread *));
+
+
+/* Version 0x16 compatibility */
+#define CAM_VERSION_0x16 0x16
+
+/* The size of the union ccb didn't change when going to 0x17 */
+#define CAMIOCOMMAND_0x16 _IOC(IOC_INOUT, CAM_VERSION_0x16, 2, CAM_0X17_LEN)
+#define CAMGETPASSTHRU_0x16 _IOC(IOC_INOUT, CAM_VERSION_0x16, 3, CAM_0X17_LEN)
+
+#define CAM_SCATTER_VALID_0x16 0x00000010
+#define CAM_SG_LIST_PHYS_0x16 0x00040000
+#define CAM_DATA_PHYS_0x16 0x00200000
+
+/* Version 0x17 compatibility */
+#define CAM_VERSION_0x17 0x17
+
+struct ccb_hdr_0x17 {
+ cam_pinfo pinfo; /* Info for priority scheduling */
+ camq_entry xpt_links; /* For chaining in the XPT layer */
+ camq_entry sim_links; /* For chaining in the SIM layer */
+ camq_entry periph_links; /* For chaining in the type driver */
+ u_int32_t retry_count;
+ void (*cbfcnp)(struct cam_periph *, union ccb *);
+ xpt_opcode func_code; /* XPT function code */
+ u_int32_t status; /* Status returned by CAM subsystem */
+ struct cam_path *path; /* Compiled path for this ccb */
+ path_id_t path_id; /* Path ID for the request */
+ target_id_t target_id; /* Target device ID */
+ u_int target_lun; /* Target LUN number */
+ u_int32_t flags; /* ccb_flags */
+ ccb_ppriv_area periph_priv;
+ ccb_spriv_area sim_priv;
+ u_int32_t timeout; /* Hard timeout value in seconds */
+ struct callout_handle timeout_ch;
+};
+
+struct ccb_pathinq_0x17 {
+ struct ccb_hdr_0x17 ccb_h;
+ u_int8_t version_num; /* Version number for the SIM/HBA */
+ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */
+ u_int8_t target_sprt; /* Flags for target mode support */
+ u_int8_t hba_misc; /* Misc HBA features */
+ u_int16_t hba_eng_cnt; /* HBA engine count */
+ /* Vendor Unique capabilities */
+ u_int8_t vuhba_flags[VUHBALEN];
+ u_int32_t max_target; /* Maximum supported Target */
+ u_int32_t max_lun; /* Maximum supported Lun */
+ u_int32_t async_flags; /* Installed Async handlers */
+ path_id_t hpath_id; /* Highest Path ID in the subsystem */
+ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */
+ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */
+ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */
+ char dev_name[DEV_IDLEN];/* Device name for SIM */
+ u_int32_t unit_number; /* Unit number for SIM */
+ u_int32_t bus_id; /* Bus ID for SIM */
+ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */
+ cam_proto protocol;
+ u_int protocol_version;
+ cam_xport transport;
+ u_int transport_version;
+ union {
+ struct ccb_pathinq_settings_spi spi;
+ struct ccb_pathinq_settings_fc fc;
+ struct ccb_pathinq_settings_sas sas;
+ char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE];
+ } xport_specific;
+ u_int maxio; /* Max supported I/O size, in bytes. */
+ u_int16_t hba_vendor; /* HBA vendor ID */
+ u_int16_t hba_device; /* HBA device ID */
+ u_int16_t hba_subvendor; /* HBA subvendor ID */
+ u_int16_t hba_subdevice; /* HBA subdevice ID */
+};
+
+struct ccb_trans_settings_0x17 {
+ struct ccb_hdr_0x17 ccb_h;
+ cts_type type; /* Current or User settings */
+ cam_proto protocol;
+ u_int protocol_version;
+ cam_xport transport;
+ u_int transport_version;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_trans_settings_ata ata;
+ struct ccb_trans_settings_scsi scsi;
+ } proto_specific;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_trans_settings_spi spi;
+ struct ccb_trans_settings_fc fc;
+ struct ccb_trans_settings_sas sas;
+ struct ccb_trans_settings_pata ata;
+ struct ccb_trans_settings_sata sata;
+ } xport_specific;
+};
+
+#define CAM_0X17_DATA_LEN CAM_0X18_DATA_LEN
+#define CAM_0X17_LEN (sizeof(struct ccb_hdr_0x17) + CAM_0X17_DATA_LEN)
+
+#define CAMIOCOMMAND_0x17 _IOC(IOC_INOUT, CAM_VERSION_0x17, 2, CAM_0X17_LEN)
+#define CAMGETPASSTHRU_0x17 _IOC(IOC_INOUT, CAM_VERSION_0x17, 3, CAM_0X17_LEN)
+
+/* Version 0x18 compatibility */
+#define CAM_VERSION_0x18 0x18
+
+struct ccb_hdr_0x18 {
+ cam_pinfo pinfo; /* Info for priority scheduling */
+ camq_entry xpt_links; /* For chaining in the XPT layer */
+ camq_entry sim_links; /* For chaining in the SIM layer */
+ camq_entry periph_links; /* For chaining in the type driver */
+ u_int32_t retry_count;
+ void (*cbfcnp)(struct cam_periph *, union ccb *);
+ xpt_opcode func_code; /* XPT function code */
+ u_int32_t status; /* Status returned by CAM subsystem */
+ struct cam_path *path; /* Compiled path for this ccb */
+ path_id_t path_id; /* Path ID for the request */
+ target_id_t target_id; /* Target device ID */
+ u_int target_lun; /* Target LUN number */
+ u_int64_t ext_lun; /* 64-bit LUN, more or less */
+ u_int32_t flags; /* ccb_flags */
+ u_int32_t xflags; /* extended ccb_flags */
+ ccb_ppriv_area periph_priv;
+ ccb_spriv_area sim_priv;
+ ccb_qos_area qos;
+ u_int32_t timeout; /* Hard timeout value in seconds */
+ struct timeval softtimeout; /* Soft timeout value in sec + usec */
+};
+
+typedef enum {
+ CAM_EXTLUN_VALID_0x18 = 0x00000001,/* 64bit lun field is valid */
+} ccb_xflags_0x18;
+
+struct ccb_trans_settings_0x18 {
+ struct ccb_hdr_0x18 ccb_h;
+ cts_type type; /* Current or User settings */
+ cam_proto protocol;
+ u_int protocol_version;
+ cam_xport transport;
+ u_int transport_version;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_trans_settings_ata ata;
+ struct ccb_trans_settings_scsi scsi;
+ } proto_specific;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_trans_settings_spi spi;
+ struct ccb_trans_settings_fc fc;
+ struct ccb_trans_settings_sas sas;
+ struct ccb_trans_settings_pata ata;
+ struct ccb_trans_settings_sata sata;
+ } xport_specific;
+};
+
+struct dev_match_result_0x18 {
+ dev_match_type type;
+ union {
+ struct {
+ char periph_name[DEV_IDLEN];
+ u_int32_t unit_number;
+ path_id_t path_id;
+ target_id_t target_id;
+ u_int target_lun;
+ } periph_result;
+ struct {
+ path_id_t path_id;
+ target_id_t target_id;
+ u_int target_lun;
+ cam_proto protocol;
+ struct scsi_inquiry_data inq_data;
+ struct ata_params ident_data;
+ dev_result_flags flags;
+ } device_result;
+ struct bus_match_result bus_result;
+ } result;
+};
+
+#define CAM_0X18_DATA_LEN (sizeof(union ccb) - 2*sizeof(void *) - sizeof(struct ccb_hdr))
+#define CAM_0X18_LEN (sizeof(struct ccb_hdr_0x18) + CAM_0X18_DATA_LEN)
+
+#define CAMIOCOMMAND_0x18 _IOC(IOC_INOUT, CAM_VERSION_0x18, 2, CAM_0X18_LEN)
+#define CAMGETPASSTHRU_0x18 _IOC(IOC_INOUT, CAM_VERSION_0x18, 3, CAM_0X18_LEN)
+
+#endif
+#endif
diff --git a/freebsd/sys/cam/cam_debug.h b/freebsd/sys/cam/cam_debug.h
index 7b619a2..c13d7c1 100644
--- a/freebsd/sys/cam/cam_debug.h
+++ b/freebsd/sys/cam/cam_debug.h
@@ -44,7 +44,7 @@ typedef enum {
CAM_DEBUG_PROBE = 0x40 /* print out probe actions */
} cam_debug_flags;
-#if defined(_KERNEL) && !defined(__rtems__)
+#if defined(_KERNEL) && defined(__rtems__)
#ifndef CAM_DEBUG_FLAGS
#define CAM_DEBUG_FLAGS CAM_DEBUG_NONE
diff --git a/freebsd/sys/cam/cam_periph.c b/freebsd/sys/cam/cam_periph.c
new file mode 100644
index 0000000..4986639
--- /dev/null
+++ b/freebsd/sys/cam/cam_periph.c
@@ -0,0 +1,2018 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Common functions for CAM "type" (peripheral) drivers.
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bio.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/buf.h>
+#include <sys/proc.h>
+#include <sys/devicestat.h>
+#include <sys/bus.h>
+#include <sys/sbuf.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+
+#include <cam/scsi/scsi_all.h>
+#ifndef __rtems__
+#include <cam/scsi/scsi_message.h>
+#include <cam/scsi/scsi_pass.h>
+#endif /* __rtems__ */
+//Extracted from scsi
+#define CAMIOCOMMAND _IOWR(CAM_VERSION, 2, union ccb)
+#define CAMGETPASSTHRU _IOWR(CAM_VERSION, 3, union ccb)
+
+static u_int camperiphnextunit(struct periph_driver *p_drv,
+ u_int newunit, int wired,
+ path_id_t pathid, target_id_t target,
+ lun_id_t lun);
+static u_int camperiphunit(struct periph_driver *p_drv,
+ path_id_t pathid, target_id_t target,
+ lun_id_t lun);
+static void camperiphdone(struct cam_periph *periph,
+ union ccb *done_ccb);
+static void camperiphfree(struct cam_periph *periph);
+static int camperiphscsistatuserror(union ccb *ccb,
+ union ccb **orig_ccb,
+ cam_flags camflags,
+ u_int32_t sense_flags,
+ int *openings,
+ u_int32_t *relsim_flags,
+ u_int32_t *timeout,
+ u_int32_t *action,
+ const char **action_string);
+static int camperiphscsisenseerror(union ccb *ccb,
+ union ccb **orig_ccb,
+ cam_flags camflags,
+ u_int32_t sense_flags,
+ int *openings,
+ u_int32_t *relsim_flags,
+ u_int32_t *timeout,
+ u_int32_t *action,
+ const char **action_string);
+static void cam_periph_devctl_notify(union ccb *ccb);
+
+static int nperiph_drivers;
+static int initialized = 0;
+struct periph_driver **periph_drivers;
+
+static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
+
+static int periph_selto_delay = 1000;
+TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
+static int periph_noresrc_delay = 500;
+TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
+static int periph_busy_delay = 500;
+TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
+
+
+void
+periphdriver_register(void *data)
+{
+ struct periph_driver *drv = (struct periph_driver *)data;
+ struct periph_driver **newdrivers, **old;
+ int ndrivers;
+
+again:
+ ndrivers = nperiph_drivers + 2;
+ newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
+ M_WAITOK);
+ xpt_lock_buses();
+ if (ndrivers != nperiph_drivers + 2) {
+ /*
+ * Lost race against itself; go around.
+ */
+ xpt_unlock_buses();
+ free(newdrivers, M_CAMPERIPH);
+ goto again;
+ }
+ if (periph_drivers)
+ bcopy(periph_drivers, newdrivers,
+ sizeof(*newdrivers) * nperiph_drivers);
+ newdrivers[nperiph_drivers] = drv;
+ newdrivers[nperiph_drivers + 1] = NULL;
+ old = periph_drivers;
+ periph_drivers = newdrivers;
+ nperiph_drivers++;
+ xpt_unlock_buses();
+ if (old)
+ free(old, M_CAMPERIPH);
+ /* If driver marked as early or it is late now, initialize it. */
+ if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
+ initialized > 1)
+ (*drv->init)();
+}
+
+int
+periphdriver_unregister(void *data)
+{
+ struct periph_driver *drv = (struct periph_driver *)data;
+ int error, n;
+
+ /* If driver marked as early or it is late now, deinitialize it. */
+ if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
+ initialized > 1) {
+ if (drv->deinit == NULL) {
+ printf("CAM periph driver '%s' doesn't have deinit.\n",
+ drv->driver_name);
+ return (EOPNOTSUPP);
+ }
+ error = drv->deinit();
+ if (error != 0)
+ return (error);
+ }
+
+ xpt_lock_buses();
+ for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
+ ;
+ KASSERT(n < nperiph_drivers,
+ ("Periph driver '%s' was not registered", drv->driver_name));
+ for (; n + 1 < nperiph_drivers; n++)
+ periph_drivers[n] = periph_drivers[n + 1];
+ periph_drivers[n + 1] = NULL;
+ nperiph_drivers--;
+ xpt_unlock_buses();
+ return (0);
+}
+
+void
+periphdriver_init(int level)
+{
+ int i, early;
+
+ initialized = max(initialized, level);
+ for (i = 0; periph_drivers[i] != NULL; i++) {
+ early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
+ if (early == initialized)
+ (*periph_drivers[i]->init)();
+ }
+}
+
+cam_status
+cam_periph_alloc(periph_ctor_t *periph_ctor,
+ periph_oninv_t *periph_oninvalidate,
+ periph_dtor_t *periph_dtor, periph_start_t *periph_start,
+ char *name, cam_periph_type type, struct cam_path *path,
+ ac_callback_t *ac_callback, ac_code code, void *arg)
+{
+ struct periph_driver **p_drv;
+ struct cam_sim *sim;
+ struct cam_periph *periph;
+ struct cam_periph *cur_periph;
+ path_id_t path_id;
+ target_id_t target_id;
+ lun_id_t lun_id;
+ cam_status status;
+ u_int init_level;
+
+ init_level = 0;
+ /*
+ * Handle Hot-Plug scenarios. If there is already a peripheral
+ * of our type assigned to this path, we are likely waiting for
+ * final close on an old, invalidated, peripheral. If this is
+ * the case, queue up a deferred call to the peripheral's async
+ * handler. If it looks like a mistaken re-allocation, complain.
+ */
+ if ((periph = cam_periph_find(path, name)) != NULL) {
+
+ if ((periph->flags & CAM_PERIPH_INVALID) != 0
+ && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
+ periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
+ periph->deferred_callback = ac_callback;
+ periph->deferred_ac = code;
+ return (CAM_REQ_INPROG);
+ } else {
+ printf("cam_periph_alloc: attempt to re-allocate "
+ "valid device %s%d rejected flags %#x "
+ "refcount %d\n", periph->periph_name,
+ periph->unit_number, periph->flags,
+ periph->refcount);
+ }
+ return (CAM_REQ_INVALID);
+ }
+
+ periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
+ M_NOWAIT|M_ZERO);
+
+ if (periph == NULL)
+ return (CAM_RESRC_UNAVAIL);
+
+ init_level++;
+
+
+ sim = xpt_path_sim(path);
+ path_id = xpt_path_path_id(path);
+ target_id = xpt_path_target_id(path);
+ lun_id = xpt_path_lun_id(path);
+ periph->periph_start = periph_start;
+ periph->periph_dtor = periph_dtor;
+ periph->periph_oninval = periph_oninvalidate;
+ periph->type = type;
+ periph->periph_name = name;
+ periph->scheduled_priority = CAM_PRIORITY_NONE;
+ periph->immediate_priority = CAM_PRIORITY_NONE;
+ periph->refcount = 1; /* Dropped by invalidation. */
+ periph->sim = sim;
+ SLIST_INIT(&periph->ccb_list);
+ status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
+ if (status != CAM_REQ_CMP)
+ goto failure;
+ periph->path = path;
+
+ xpt_lock_buses();
+ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
+ if (strcmp((*p_drv)->driver_name, name) == 0)
+ break;
+ }
+ if (*p_drv == NULL) {
+ printf("cam_periph_alloc: invalid periph name '%s'\n", name);
+ xpt_unlock_buses();
+ xpt_free_path(periph->path);
+ free(periph, M_CAMPERIPH);
+ return (CAM_REQ_INVALID);
+ }
+ periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
+ cur_periph = TAILQ_FIRST(&(*p_drv)->units);
+ while (cur_periph != NULL
+ && cur_periph->unit_number < periph->unit_number)
+ cur_periph = TAILQ_NEXT(cur_periph, unit_links);
+ if (cur_periph != NULL) {
+ KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
+ TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
+ } else {
+ TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
+ (*p_drv)->generation++;
+ }
+ xpt_unlock_buses();
+
+ init_level++;
+
+ status = xpt_add_periph(periph);
+ if (status != CAM_REQ_CMP)
+ goto failure;
+
+ init_level++;
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
+
+ status = periph_ctor(periph, arg);
+
+ if (status == CAM_REQ_CMP)
+ init_level++;
+
+failure:
+ switch (init_level) {
+ case 4:
+ /* Initialized successfully */
+ break;
+ case 3:
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
+ xpt_remove_periph(periph);
+ /* FALLTHROUGH */
+ case 2:
+ xpt_lock_buses();
+ TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
+ xpt_unlock_buses();
+ xpt_free_path(periph->path);
+ /* FALLTHROUGH */
+ case 1:
+ free(periph, M_CAMPERIPH);
+ /* FALLTHROUGH */
+ case 0:
+ /* No cleanup to perform. */
+ break;
+ default:
+ panic("%s: Unknown init level", __func__);
+ }
+ return(status);
+}
+
+/*
+ * Find a peripheral structure with the specified path, target, lun,
+ * and (optionally) type. If the name is NULL, this function will return
+ * the first peripheral driver that matches the specified path.
+ */
+struct cam_periph *
+cam_periph_find(struct cam_path *path, char *name)
+{
+ struct periph_driver **p_drv;
+ struct cam_periph *periph;
+
+ xpt_lock_buses();
+ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
+
+ if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
+ continue;
+
+ TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
+ if (xpt_path_comp(periph->path, path) == 0) {
+ xpt_unlock_buses();
+ cam_periph_assert(periph, MA_OWNED);
+ return(periph);
+ }
+ }
+ if (name != NULL) {
+ xpt_unlock_buses();
+ return(NULL);
+ }
+ }
+ xpt_unlock_buses();
+ return(NULL);
+}
+
+/*
+ * Find peripheral driver instances attached to the specified path.
+ */
+int
+cam_periph_list(struct cam_path *path, struct sbuf *sb)
+{
+ struct sbuf local_sb;
+ struct periph_driver **p_drv;
+ struct cam_periph *periph;
+ int count;
+ int sbuf_alloc_len;
+
+ sbuf_alloc_len = 16;
+retry:
+ sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
+ count = 0;
+ xpt_lock_buses();
+ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
+
+ TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
+ if (xpt_path_comp(periph->path, path) != 0)
+ continue;
+
+ if (sbuf_len(&local_sb) != 0)
+ sbuf_cat(&local_sb, ",");
+
+ sbuf_printf(&local_sb, "%s%d", periph->periph_name,
+ periph->unit_number);
+
+ if (sbuf_error(&local_sb) == ENOMEM) {
+ sbuf_alloc_len *= 2;
+ xpt_unlock_buses();
+ sbuf_delete(&local_sb);
+ goto retry;
+ }
+ count++;
+ }
+ }
+ xpt_unlock_buses();
+ sbuf_finish(&local_sb);
+ sbuf_cpy(sb, sbuf_data(&local_sb));
+ sbuf_delete(&local_sb);
+ return (count);
+}
+
+cam_status
+cam_periph_acquire(struct cam_periph *periph)
+{
+ cam_status status;
+
+ status = CAM_REQ_CMP_ERR;
+ if (periph == NULL)
+ return (status);
+
+ xpt_lock_buses();
+ if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
+ periph->refcount++;
+ status = CAM_REQ_CMP;
+ }
+ xpt_unlock_buses();
+
+ return (status);
+}
+
+void
+cam_periph_doacquire(struct cam_periph *periph)
+{
+
+ xpt_lock_buses();
+ KASSERT(periph->refcount >= 1,
+ ("cam_periph_doacquire() with refcount == %d", periph->refcount));
+ periph->refcount++;
+ xpt_unlock_buses();
+}
+
+void
+cam_periph_release_locked_buses(struct cam_periph *periph)
+{
+
+ cam_periph_assert(periph, MA_OWNED);
+ KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
+ if (--periph->refcount == 0)
+ camperiphfree(periph);
+}
+
+void
+cam_periph_release_locked(struct cam_periph *periph)
+{
+
+ if (periph == NULL)
+ return;
+
+ xpt_lock_buses();
+ cam_periph_release_locked_buses(periph);
+ xpt_unlock_buses();
+}
+
+void
+cam_periph_release(struct cam_periph *periph)
+{
+ struct mtx *mtx;
+
+ if (periph == NULL)
+ return;
+
+ cam_periph_assert(periph, MA_NOTOWNED);
+ mtx = cam_periph_mtx(periph);
+ mtx_lock(mtx);
+ cam_periph_release_locked(periph);
+ mtx_unlock(mtx);
+}
+
+int
+cam_periph_hold(struct cam_periph *periph, int priority)
+{
+ int error;
+
+ /*
+ * Increment the reference count on the peripheral
+ * while we wait for our lock attempt to succeed
+ * to ensure the peripheral doesn't disappear out
+ * from user us while we sleep.
+ */
+
+ if (cam_periph_acquire(periph) != CAM_REQ_CMP)
+ return (ENXIO);
+
+ cam_periph_assert(periph, MA_OWNED);
+ while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
+ periph->flags |= CAM_PERIPH_LOCK_WANTED;
+ if ((error = cam_periph_sleep(periph, periph, priority,
+ "caplck", 0)) != 0) {
+ cam_periph_release_locked(periph);
+ return (error);
+ }
+ if (periph->flags & CAM_PERIPH_INVALID) {
+ cam_periph_release_locked(periph);
+ return (ENXIO);
+ }
+ }
+
+ periph->flags |= CAM_PERIPH_LOCKED;
+ return (0);
+}
+
+void
+cam_periph_unhold(struct cam_periph *periph)
+{
+
+ cam_periph_assert(periph, MA_OWNED);
+
+ periph->flags &= ~CAM_PERIPH_LOCKED;
+ if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
+ periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
+ wakeup(periph);
+ }
+
+ cam_periph_release_locked(periph);
+}
+
+/*
+ * Look for the next unit number that is not currently in use for this
+ * peripheral type starting at "newunit". Also exclude unit numbers that
+ * are reserved by for future "hardwiring" unless we already know that this
+ * is a potential wired device. Only assume that the device is "wired" the
+ * first time through the loop since after that we'll be looking at unit
+ * numbers that did not match a wiring entry.
+ */
+static u_int
+camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
+ path_id_t pathid, target_id_t target, lun_id_t lun)
+{
+ struct cam_periph *periph;
+ char *periph_name;
+ int i, val, dunit, r;
+ const char *dname, *strval;
+
+ periph_name = p_drv->driver_name;
+ for (;;newunit++) {
+
+ for (periph = TAILQ_FIRST(&p_drv->units);
+ periph != NULL && periph->unit_number != newunit;
+ periph = TAILQ_NEXT(periph, unit_links))
+ ;
+
+ if (periph != NULL && periph->unit_number == newunit) {
+ if (wired != 0) {
+ xpt_print(periph->path, "Duplicate Wired "
+ "Device entry!\n");
+ xpt_print(periph->path, "Second device (%s "
+ "device at scbus%d target %d lun %d) will "
+ "not be wired\n", periph_name, pathid,
+ target, lun);
+ wired = 0;
+ }
+ continue;
+ }
+ if (wired)
+ break;
+
+ /*
+ * Don't match entries like "da 4" as a wired down
+ * device, but do match entries like "da 4 target 5"
+ * or even "da 4 scbus 1".
+ */
+ i = 0;
+ dname = periph_name;
+#ifndef __rtems__
+ for (;;) {
+ r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
+ if (r != 0)
+ break;
+ /* if no "target" and no specific scbus, skip */
+ if (resource_int_value(dname, dunit, "target", &val) &&
+ (resource_string_value(dname, dunit, "at",&strval)||
+ strcmp(strval, "scbus") == 0))
+ continue;
+ if (newunit == dunit)
+ break;
+ }
+#else
+ r = 1;
+#endif
+ if (r != 0)
+ break;
+ }
+ return (newunit);
+}
+
+static u_int
+camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
+ target_id_t target, lun_id_t lun)
+{
+ u_int unit;
+ int wired, i, val, dunit;
+ const char *dname, *strval;
+ char pathbuf[32], *periph_name;
+
+ periph_name = p_drv->driver_name;
+ snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
+ unit = 0;
+ i = 0;
+ dname = periph_name;
+#ifndef __rtems__
+ for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
+ wired = 0) {
+ if (resource_string_value(dname, dunit, "at", &strval) == 0) {
+ if (strcmp(strval, pathbuf) != 0)
+ continue;
+ wired++;
+ }
+ if (resource_int_value(dname, dunit, "target", &val) == 0) {
+ if (val != target)
+ continue;
+ wired++;
+ }
+ if (resource_int_value(dname, dunit, "lun", &val) == 0) {
+ if (val != lun)
+ continue;
+ wired++;
+ }
+ if (wired != 0) {
+ unit = dunit;
+ break;
+ }
+ }
+#endif
+ /*
+ * Either start from 0 looking for the next unit or from
+ * the unit number given in the resource config. This way,
+ * if we have wildcard matches, we don't return the same
+ * unit number twice.
+ */
+ unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
+
+ return (unit);
+}
+
+void
+cam_periph_invalidate(struct cam_periph *periph)
+{
+
+ cam_periph_assert(periph, MA_OWNED);
+ /*
+ * We only call this routine the first time a peripheral is
+ * invalidated.
+ */
+ if ((periph->flags & CAM_PERIPH_INVALID) != 0)
+ return;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
+ if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
+ xpt_denounce_periph(periph);
+ periph->flags |= CAM_PERIPH_INVALID;
+ periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
+ if (periph->periph_oninval != NULL)
+ periph->periph_oninval(periph);
+ cam_periph_release_locked(periph);
+}
+
+static void
+camperiphfree(struct cam_periph *periph)
+{
+ struct periph_driver **p_drv;
+
+ cam_periph_assert(periph, MA_OWNED);
+ KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
+ periph->periph_name, periph->unit_number));
+ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
+ if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
+ break;
+ }
+ if (*p_drv == NULL) {
+ printf("camperiphfree: attempt to free non-existant periph\n");
+ return;
+ }
+
+ /*
+ * We need to set this flag before dropping the topology lock, to
+ * let anyone who is traversing the list that this peripheral is
+ * about to be freed, and there will be no more reference count
+ * checks.
+ */
+ periph->flags |= CAM_PERIPH_FREE;
+
+ /*
+ * The peripheral destructor semantics dictate calling with only the
+ * SIM mutex held. Since it might sleep, it should not be called
+ * with the topology lock held.
+ */
+ xpt_unlock_buses();
+
+ /*
+ * We need to call the peripheral destructor prior to removing the
+ * peripheral from the list. Otherwise, we risk running into a
+ * scenario where the peripheral unit number may get reused
+ * (because it has been removed from the list), but some resources
+ * used by the peripheral are still hanging around. In particular,
+ * the devfs nodes used by some peripherals like the pass(4) driver
+ * aren't fully cleaned up until the destructor is run. If the
+ * unit number is reused before the devfs instance is fully gone,
+ * devfs will panic.
+ */
+ if (periph->periph_dtor != NULL)
+ periph->periph_dtor(periph);
+
+ /*
+ * The peripheral list is protected by the topology lock.
+ */
+ xpt_lock_buses();
+
+ TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
+ (*p_drv)->generation++;
+
+ xpt_remove_periph(periph);
+
+ xpt_unlock_buses();
+ if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
+ xpt_print(periph->path, "Periph destroyed\n");
+ else
+ CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
+
+ if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
+ union ccb ccb;
+ void *arg;
+
+ switch (periph->deferred_ac) {
+ case AC_FOUND_DEVICE:
+ ccb.ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+ xpt_action(&ccb);
+ arg = &ccb;
+ break;
+ case AC_PATH_REGISTERED:
+ ccb.ccb_h.func_code = XPT_PATH_INQ;
+ xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+ xpt_action(&ccb);
+ arg = &ccb;
+ break;
+ default:
+ arg = NULL;
+ break;
+ }
+ periph->deferred_callback(NULL, periph->deferred_ac,
+ periph->path, arg);
+ }
+ xpt_free_path(periph->path);
+ free(periph, M_CAMPERIPH);
+ xpt_lock_buses();
+}
+
+/*
+ * Map user virtual pointers into kernel virtual address space, so we can
+ * access the memory. This is now a generic function that centralizes most
+ * of the sanity checks on the data flags, if any.
+ * This also only works for up to MAXPHYS memory. Since we use
+ * buffers to map stuff in and out, we're limited to the buffer size.
+ */
+#ifndef __rtems__
+int
+cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
+ u_int maxmap)
+{
+ int numbufs, i, j;
+ int flags[CAM_PERIPH_MAXMAPS];
+ u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
+ u_int32_t lengths[CAM_PERIPH_MAXMAPS];
+ u_int32_t dirs[CAM_PERIPH_MAXMAPS];
+
+ if (maxmap == 0)
+ maxmap = DFLTPHYS; /* traditional default */
+ else if (maxmap > MAXPHYS)
+ maxmap = MAXPHYS; /* for safety */
+ switch(ccb->ccb_h.func_code) {
+ case XPT_DEV_MATCH:
+ if (ccb->cdm.match_buf_len == 0) {
+ printf("cam_periph_mapmem: invalid match buffer "
+ "length 0\n");
+ return(EINVAL);
+ }
+ if (ccb->cdm.pattern_buf_len > 0) {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
+ lengths[0] = ccb->cdm.pattern_buf_len;
+ dirs[0] = CAM_DIR_OUT;
+ data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
+ lengths[1] = ccb->cdm.match_buf_len;
+ dirs[1] = CAM_DIR_IN;
+ numbufs = 2;
+ } else {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
+ lengths[0] = ccb->cdm.match_buf_len;
+ dirs[0] = CAM_DIR_IN;
+ numbufs = 1;
+ }
+ /*
+ * This request will not go to the hardware, no reason
+ * to be so strict. vmapbuf() is able to map up to MAXPHYS.
+ */
+ maxmap = MAXPHYS;
+ break;
+ case XPT_SCSI_IO:
+ case XPT_CONT_TARGET_IO:
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+ return(0);
+ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
+ return (EINVAL);
+ data_ptrs[0] = &ccb->csio.data_ptr;
+ lengths[0] = ccb->csio.dxfer_len;
+ dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+ numbufs = 1;
+ break;
+ case XPT_ATA_IO:
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+ return(0);
+ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
+ return (EINVAL);
+ data_ptrs[0] = &ccb->ataio.data_ptr;
+ lengths[0] = ccb->ataio.dxfer_len;
+ dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+ numbufs = 1;
+ break;
+ case XPT_MMC_IO:
+ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
+ return(0);
+ /* Two mappings: one for cmd->data and one for cmd->data->data */
+ data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
+ lengths[0] = sizeof(struct mmc_data *);
+ dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
+ data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
+ lengths[1] = ccb->mmcio.cmd.data->len;
+ dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
+ numbufs = 2;
+ break;
+ case XPT_SMP_IO:
+ data_ptrs[0] = &ccb->smpio.smp_request;
+ lengths[0] = ccb->smpio.smp_request_len;
+ dirs[0] = CAM_DIR_OUT;
+ data_ptrs[1] = &ccb->smpio.smp_response;
+ lengths[1] = ccb->smpio.smp_response_len;
+ dirs[1] = CAM_DIR_IN;
+ numbufs = 2;
+ break;
+ case XPT_DEV_ADVINFO:
+ if (ccb->cdai.bufsiz == 0)
+ return (0);
+
+ data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
+ lengths[0] = ccb->cdai.bufsiz;
+ dirs[0] = CAM_DIR_IN;
+ numbufs = 1;
+
+ /*
+ * This request will not go to the hardware, no reason
+ * to be so strict. vmapbuf() is able to map up to MAXPHYS.
+ */
+ maxmap = MAXPHYS;
+ break;
+ default:
+ return(EINVAL);
+ break; /* NOTREACHED */
+ }
+
+ /*
+ * Check the transfer length and permissions first, so we don't
+ * have to unmap any previously mapped buffers.
+ */
+ for (i = 0; i < numbufs; i++) {
+
+ flags[i] = 0;
+
+ /*
+ * The userland data pointer passed in may not be page
+ * aligned. vmapbuf() truncates the address to a page
+ * boundary, so if the address isn't page aligned, we'll
+ * need enough space for the given transfer length, plus
+ * whatever extra space is necessary to make it to the page
+ * boundary.
+ */
+ if ((lengths[i] +
+ (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
+ printf("cam_periph_mapmem: attempt to map %lu bytes, "
+ "which is greater than %lu\n",
+ (long)(lengths[i] +
+ (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
+ (u_long)maxmap);
+ return(E2BIG);
+ }
+
+ if (dirs[i] & CAM_DIR_OUT) {
+ flags[i] = BIO_WRITE;
+ }
+
+ if (dirs[i] & CAM_DIR_IN) {
+ flags[i] = BIO_READ;
+ }
+
+ }
+
+ /*
+ * This keeps the kernel stack of current thread from getting
+ * swapped. In low-memory situations where the kernel stack might
+ * otherwise get swapped out, this holds it and allows the thread
+ * to make progress and release the kernel mapped pages sooner.
+ *
+ * XXX KDM should I use P_NOSWAP instead?
+ */
+ PHOLD(curproc);
+
+ for (i = 0; i < numbufs; i++) {
+ /*
+ * Get the buffer.
+ */
+ mapinfo->bp[i] = getpbuf(NULL);
+
+ /* put our pointer in the data slot */
+ mapinfo->bp[i]->b_data = *data_ptrs[i];
+
+ /* save the user's data address */
+ mapinfo->bp[i]->b_caller1 = *data_ptrs[i];
+
+ /* set the transfer length, we know it's < MAXPHYS */
+ mapinfo->bp[i]->b_bufsize = lengths[i];
+
+ /* set the direction */
+ mapinfo->bp[i]->b_iocmd = flags[i];
+
+ /*
+ * Map the buffer into kernel memory.
+ *
+ * Note that useracc() alone is not a sufficient test.
+ * vmapbuf() can still fail due to a smaller file mapped
+ * into a larger area of VM, or if userland races against
+ * vmapbuf() after the useracc() check.
+ */
+ if (vmapbuf(mapinfo->bp[i], 1) < 0) {
+ for (j = 0; j < i; ++j) {
+ *data_ptrs[j] = mapinfo->bp[j]->b_caller1;
+ vunmapbuf(mapinfo->bp[j]);
+ relpbuf(mapinfo->bp[j], NULL);
+ }
+ relpbuf(mapinfo->bp[i], NULL);
+ PRELE(curproc);
+ return(EACCES);
+ }
+
+ /* set our pointer to the new mapped area */
+ *data_ptrs[i] = mapinfo->bp[i]->b_data;
+
+ mapinfo->num_bufs_used++;
+ }
+
+ /*
+ * Now that we've gotten this far, change ownership to the kernel
+ * of the buffers so that we don't run afoul of returning to user
+ * space with locks (on the buffer) held.
+ */
+ for (i = 0; i < numbufs; i++) {
+ BUF_KERNPROC(mapinfo->bp[i]);
+ }
+
+
+ return(0);
+}
+
+/*
+ * Unmap memory segments mapped into kernel virtual address space by
+ * cam_periph_mapmem().
+ */
+void
+cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
+{
+ int numbufs, i;
+ u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
+
+ if (mapinfo->num_bufs_used <= 0) {
+ /* nothing to free and the process wasn't held. */
+ return;
+ }
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_DEV_MATCH:
+ numbufs = min(mapinfo->num_bufs_used, 2);
+
+ if (numbufs == 1) {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
+ } else {
+ data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
+ data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
+ }
+ break;
+ case XPT_SCSI_IO:
+ case XPT_CONT_TARGET_IO:
+ data_ptrs[0] = &ccb->csio.data_ptr;
+ numbufs = min(mapinfo->num_bufs_used, 1);
+ break;
+ case XPT_ATA_IO:
+ data_ptrs[0] = &ccb->ataio.data_ptr;
+ numbufs = min(mapinfo->num_bufs_used, 1);
+ break;
+ case XPT_SMP_IO:
+ numbufs = min(mapinfo->num_bufs_used, 2);
+ data_ptrs[0] = &ccb->smpio.smp_request;
+ data_ptrs[1] = &ccb->smpio.smp_response;
+ break;
+ case XPT_DEV_ADVINFO:
+ numbufs = min(mapinfo->num_bufs_used, 1);
+ data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
+ break;
+ default:
+ /* allow ourselves to be swapped once again */
+ PRELE(curproc);
+ return;
+ break; /* NOTREACHED */
+ }
+
+ for (i = 0; i < numbufs; i++) {
+ /* Set the user's pointer back to the original value */
+ *data_ptrs[i] = mapinfo->bp[i]->b_caller1;
+
+ /* unmap the buffer */
+ vunmapbuf(mapinfo->bp[i]);
+
+ /* release the buffer */
+ relpbuf(mapinfo->bp[i], NULL);
+ }
+
+ /* allow ourselves to be swapped once again */
+ PRELE(curproc);
+}
+
+#endif
+int
+cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
+ int (*error_routine)(union ccb *ccb,
+ cam_flags camflags,
+ u_int32_t sense_flags))
+{
+ union ccb *ccb;
+ int error;
+ int found;
+
+ error = found = 0;
+
+ switch(cmd){
+ case CAMGETPASSTHRU:
+ ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
+ xpt_setup_ccb(&ccb->ccb_h,
+ ccb->ccb_h.path,
+ CAM_PRIORITY_NORMAL);
+ ccb->ccb_h.func_code = XPT_GDEVLIST;
+
+ /*
+ * Basically, the point of this is that we go through
+ * getting the list of devices, until we find a passthrough
+ * device. In the current version of the CAM code, the
+ * only way to determine what type of device we're dealing
+ * with is by its name.
+ */
+ while (found == 0) {
+ ccb->cgdl.index = 0;
+ ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
+ while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
+
+ /* we want the next device in the list */
+ xpt_action(ccb);
+ if (strncmp(ccb->cgdl.periph_name,
+ "pass", 4) == 0){
+ found = 1;
+ break;
+ }
+ }
+ if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
+ (found == 0)) {
+ ccb->cgdl.periph_name[0] = '\0';
+ ccb->cgdl.unit_number = 0;
+ break;
+ }
+ }
+
+ /* copy the result back out */
+ bcopy(ccb, addr, sizeof(union ccb));
+
+ /* and release the ccb */
+ xpt_release_ccb(ccb);
+
+ break;
+ default:
+ error = ENOTTY;
+ break;
+ }
+ return(error);
+}
+
+static void
+cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
+{
+
+ panic("%s: already done with ccb %p", __func__, done_ccb);
+}
+
+static void
+cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
+{
+
+ /* Caller will release the CCB */
+ xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
+ done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
+ wakeup(&done_ccb->ccb_h.cbfcnp);
+}
+
+static void
+cam_periph_ccbwait(union ccb *ccb)
+{
+
+ if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
+ while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
+ xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
+ PRIBIO, "cbwait", 0);
+ }
+ KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
+ (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
+ ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
+ "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
+ ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
+}
+
+int
+cam_periph_runccb(union ccb *ccb,
+ int (*error_routine)(union ccb *ccb,
+ cam_flags camflags,
+ u_int32_t sense_flags),
+ cam_flags camflags, u_int32_t sense_flags,
+ struct devstat *ds)
+{
+ struct bintime *starttime;
+ struct bintime ltime;
+ int error;
+
+ starttime = NULL;
+ xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
+ KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
+ ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
+ ccb->ccb_h.func_code, ccb->ccb_h.flags));
+#ifndef __rtems__
+ /*
+ * If the user has supplied a stats structure, and if we understand
+ * this particular type of ccb, record the transaction start.
+ */
+ if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
+ ccb->ccb_h.func_code == XPT_ATA_IO)) {
+ starttime = &ltime;
+ binuptime(starttime);
+ devstat_start_transaction(ds, starttime);
+ }
+#endif
+ ccb->ccb_h.cbfcnp = cam_periph_done;
+ xpt_action(ccb);
+
+ do {
+ cam_periph_ccbwait(ccb);
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
+ error = 0;
+ else if (error_routine != NULL) {
+ ccb->ccb_h.cbfcnp = cam_periph_done;
+ error = (*error_routine)(ccb, camflags, sense_flags);
+ } else
+ error = 0;
+
+ } while (error == ERESTART);
+
+ if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
+ cam_release_devq(ccb->ccb_h.path,
+ /* relsim_flags */0,
+ /* openings */0,
+ /* timeout */0,
+ /* getcount_only */ FALSE);
+ ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
+ }
+#ifndef __rtems__
+ if (ds != NULL) {
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ devstat_end_transaction(ds,
+ ccb->csio.dxfer_len - ccb->csio.resid,
+ ccb->csio.tag_action & 0x3,
+ ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
+ CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
+ (ccb->ccb_h.flags & CAM_DIR_OUT) ?
+ DEVSTAT_WRITE :
+ DEVSTAT_READ, NULL, starttime);
+ } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
+ devstat_end_transaction(ds,
+ ccb->ataio.dxfer_len - ccb->ataio.resid,
+ 0, /* Not used in ATA */
+ ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
+ CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
+ (ccb->ccb_h.flags & CAM_DIR_OUT) ?
+ DEVSTAT_WRITE :
+ DEVSTAT_READ, NULL, starttime);
+ }
+ }
+#endif
+ return(error);
+}
+
+void
+cam_freeze_devq(struct cam_path *path)
+{
+ struct ccb_hdr ccb_h;
+
+ CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
+ xpt_setup_ccb(&ccb_h, path, /*priority*/1);
+ ccb_h.func_code = XPT_NOOP;
+ ccb_h.flags = CAM_DEV_QFREEZE;
+ xpt_action((union ccb *)&ccb_h);
+}
+
+u_int32_t
+cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
+ u_int32_t openings, u_int32_t arg,
+ int getcount_only)
+{
+ struct ccb_relsim crs;
+
+ CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
+ relsim_flags, openings, arg, getcount_only));
+ xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
+ crs.ccb_h.func_code = XPT_REL_SIMQ;
+ crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
+ crs.release_flags = relsim_flags;
+ crs.openings = openings;
+ crs.release_timeout = arg;
+ xpt_action((union ccb *)&crs);
+ return (crs.qfrozen_cnt);
+}
+
+#define saved_ccb_ptr ppriv_ptr0
+static void
+camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
+{
+ union ccb *saved_ccb;
+ cam_status status;
+ struct scsi_start_stop_unit *scsi_cmd;
+ int error_code, sense_key, asc, ascq;
+
+ scsi_cmd = (struct scsi_start_stop_unit *)
+ &done_ccb->csio.cdb_io.cdb_bytes;
+ status = done_ccb->ccb_h.status;
+
+ if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ if (scsi_extract_sense_ccb(done_ccb,
+ &error_code, &sense_key, &asc, &ascq)) {
+ /*
+ * If the error is "invalid field in CDB",
+ * and the load/eject flag is set, turn the
+ * flag off and try again. This is just in
+ * case the drive in question barfs on the
+ * load eject flag. The CAM code should set
+ * the load/eject flag by default for
+ * removable media.
+ */
+ if ((scsi_cmd->opcode == START_STOP_UNIT) &&
+ ((scsi_cmd->how & SSS_LOEJ) != 0) &&
+ (asc == 0x24) && (ascq == 0x00)) {
+ scsi_cmd->how &= ~SSS_LOEJ;
+ if (status & CAM_DEV_QFRZN) {
+ cam_release_devq(done_ccb->ccb_h.path,
+ 0, 0, 0, 0);
+ done_ccb->ccb_h.status &=
+ ~CAM_DEV_QFRZN;
+ }
+ xpt_action(done_ccb);
+ goto out;
+ }
+ }
+ if (cam_periph_error(done_ccb,
+ 0, SF_RETRY_UA | SF_NO_PRINT) == ERESTART)
+ goto out;
+ if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
+ cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
+ done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
+ }
+ } else {
+ /*
+ * If we have successfully taken a device from the not
+ * ready to ready state, re-scan the device and re-get
+ * the inquiry information. Many devices (mostly disks)
+ * don't properly report their inquiry information unless
+ * they are spun up.
+ */
+ if (scsi_cmd->opcode == START_STOP_UNIT)
+ xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
+ }
+
+ /*
+ * Perform the final retry with the original CCB so that final
+ * error processing is performed by the owner of the CCB.
+ */
+ saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
+ bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
+ xpt_free_ccb(saved_ccb);
+ if (done_ccb->ccb_h.cbfcnp != camperiphdone)
+ periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
+ xpt_action(done_ccb);
+
+out:
+ /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
+ cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
+}
+
+/*
+ * Generic Async Event handler. Peripheral drivers usually
+ * filter out the events that require personal attention,
+ * and leave the rest to this function.
+ */
+void
+cam_periph_async(struct cam_periph *periph, u_int32_t code,
+ struct cam_path *path, void *arg)
+{
+ switch (code) {
+ case AC_LOST_DEVICE:
+ cam_periph_invalidate(periph);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
+{
+ struct ccb_getdevstats cgds;
+
+ xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
+ cgds.ccb_h.func_code = XPT_GDEV_STATS;
+ xpt_action((union ccb *)&cgds);
+ cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
+}
+
+void
+cam_periph_freeze_after_event(struct cam_periph *periph,
+ struct timeval* event_time, u_int duration_ms)
+{
+ struct timeval delta;
+ struct timeval duration_tv;
+
+ if (!timevalisset(event_time))
+ return;
+
+ microtime(&delta);
+ timevalsub(&delta, event_time);
+ duration_tv.tv_sec = duration_ms / 1000;
+ duration_tv.tv_usec = (duration_ms % 1000) * 1000;
+ if (timevalcmp(&delta, &duration_tv, <)) {
+ timevalsub(&duration_tv, &delta);
+
+ duration_ms = duration_tv.tv_sec * 1000;
+ duration_ms += duration_tv.tv_usec / 1000;
+ cam_freeze_devq(periph->path);
+ cam_release_devq(periph->path,
+ RELSIM_RELEASE_AFTER_TIMEOUT,
+ /*reduction*/0,
+ /*timeout*/duration_ms,
+ /*getcount_only*/0);
+ }
+
+}
+
+static int
+camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
+ cam_flags camflags, u_int32_t sense_flags,
+ int *openings, u_int32_t *relsim_flags,
+ u_int32_t *timeout, u_int32_t *action, const char **action_string)
+{
+#ifndef __rtems__
+ int error;
+
+ switch (ccb->csio.scsi_status) {
+ case SCSI_STATUS_OK:
+ case SCSI_STATUS_COND_MET:
+ case SCSI_STATUS_INTERMED:
+ case SCSI_STATUS_INTERMED_COND_MET:
+ error = 0;
+ break;
+ case SCSI_STATUS_CMD_TERMINATED:
+ case SCSI_STATUS_CHECK_COND:
+ error = camperiphscsisenseerror(ccb, orig_ccb,
+ camflags,
+ sense_flags,
+ openings,
+ relsim_flags,
+ timeout,
+ action,
+ action_string);
+ break;
+ case SCSI_STATUS_QUEUE_FULL:
+ {
+ /* no decrement */
+ struct ccb_getdevstats cgds;
+
+ /*
+ * First off, find out what the current
+ * transaction counts are.
+ */
+ xpt_setup_ccb(&cgds.ccb_h,
+ ccb->ccb_h.path,
+ CAM_PRIORITY_NORMAL);
+ cgds.ccb_h.func_code = XPT_GDEV_STATS;
+ xpt_action((union ccb *)&cgds);
+
+ /*
+ * If we were the only transaction active, treat
+ * the QUEUE FULL as if it were a BUSY condition.
+ */
+ if (cgds.dev_active != 0) {
+ int total_openings;
+
+ /*
+ * Reduce the number of openings to
+ * be 1 less than the amount it took
+ * to get a queue full bounded by the
+ * minimum allowed tag count for this
+ * device.
+ */
+ total_openings = cgds.dev_active + cgds.dev_openings;
+ *openings = cgds.dev_active;
+ if (*openings < cgds.mintags)
+ *openings = cgds.mintags;
+ if (*openings < total_openings)
+ *relsim_flags = RELSIM_ADJUST_OPENINGS;
+ else {
+ /*
+ * Some devices report queue full for
+ * temporary resource shortages. For
+ * this reason, we allow a minimum
+ * tag count to be entered via a
+ * quirk entry to prevent the queue
+ * count on these devices from falling
+ * to a pessimisticly low value. We
+ * still wait for the next successful
+ * completion, however, before queueing
+ * more transactions to the device.
+ */
+ *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
+ }
+ *timeout = 0;
+ error = ERESTART;
+ *action &= ~SSQ_PRINT_SENSE;
+ break;
+ }
+ /* FALLTHROUGH */
+ }
+ case SCSI_STATUS_BUSY:
+ /*
+ * Restart the queue after either another
+ * command completes or a 1 second timeout.
+ */
+ if ((sense_flags & SF_RETRY_BUSY) != 0 ||
+ (ccb->ccb_h.retry_count--) > 0) {
+ error = ERESTART;
+ *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
+ | RELSIM_RELEASE_AFTER_CMDCMPLT;
+ *timeout = 1000;
+ } else {
+ error = EIO;
+ }
+ break;
+ case SCSI_STATUS_RESERV_CONFLICT:
+ default:
+ error = EIO;
+ break;
+ }
+ return (error);
+#else /* __rtems__ */
+ return 0;
+#endif /* __rtems__ */
+}
+#ifndef __rtems__
+static int
+camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
+ cam_flags camflags, u_int32_t sense_flags,
+ int *openings, u_int32_t *relsim_flags,
+ u_int32_t *timeout, u_int32_t *action, const char **action_string)
+{
+ struct cam_periph *periph;
+ union ccb *orig_ccb = ccb;
+ int error, recoveryccb;
+
+#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
+ biotrack(ccb->csio.bio, __func__);
+#endif
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
+ if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
+ /*
+ * If error recovery is already in progress, don't attempt
+ * to process this error, but requeue it unconditionally
+ * and attempt to process it once error recovery has
+ * completed. This failed command is probably related to
+ * the error that caused the currently active error recovery
+ * action so our current recovery efforts should also
+ * address this command. Be aware that the error recovery
+ * code assumes that only one recovery action is in progress
+ * on a particular peripheral instance at any given time
+ * (e.g. only one saved CCB for error recovery) so it is
+ * imperitive that we don't violate this assumption.
+ */
+ error = ERESTART;
+ *action &= ~SSQ_PRINT_SENSE;
+ } else {
+ scsi_sense_action err_action;
+ struct ccb_getdev cgd;
+
+ /*
+ * Grab the inquiry data for this device.
+ */
+ xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
+ cgd.ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)&cgd);
+
+ err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
+ sense_flags);
+ error = err_action & SS_ERRMASK;
+
+ /*
+ * Do not autostart sequential access devices
+ * to avoid unexpected tape loading.
+ */
+ if ((err_action & SS_MASK) == SS_START &&
+ SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
+ *action_string = "Will not autostart a "
+ "sequential access device";
+ goto sense_error_done;
+ }
+
+ /*
+ * Avoid recovery recursion if recovery action is the same.
+ */
+ if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
+ if (((err_action & SS_MASK) == SS_START &&
+ ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
+ ((err_action & SS_MASK) == SS_TUR &&
+ (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
+ err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
+ *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
+ *timeout = 500;
+ }
+ }
+
+ /*
+ * If the recovery action will consume a retry,
+ * make sure we actually have retries available.
+ */
+ if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
+ if (ccb->ccb_h.retry_count > 0 &&
+ (periph->flags & CAM_PERIPH_INVALID) == 0)
+ ccb->ccb_h.retry_count--;
+ else {
+ *action_string = "Retries exhausted";
+ goto sense_error_done;
+ }
+ }
+
+ if ((err_action & SS_MASK) >= SS_START) {
+ /*
+ * Do common portions of commands that
+ * use recovery CCBs.
+ */
+ orig_ccb = xpt_alloc_ccb_nowait();
+ if (orig_ccb == NULL) {
+ *action_string = "Can't allocate recovery CCB";
+ goto sense_error_done;
+ }
+ /*
+ * Clear freeze flag for original request here, as
+ * this freeze will be dropped as part of ERESTART.
+ */
+ ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
+ bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
+ }
+
+ switch (err_action & SS_MASK) {
+ case SS_NOP:
+ *action_string = "No recovery action needed";
+ error = 0;
+ break;
+ case SS_RETRY:
+ *action_string = "Retrying command (per sense data)";
+ error = ERESTART;
+ break;
+ case SS_FAIL:
+ *action_string = "Unretryable error";
+ break;
+ case SS_START:
+ {
+ int le;
+
+ /*
+ * Send a start unit command to the device, and
+ * then retry the command.
+ */
+ *action_string = "Attempting to start unit";
+ periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
+
+ /*
+ * Check for removable media and set
+ * load/eject flag appropriately.
+ */
+ if (SID_IS_REMOVABLE(&cgd.inq_data))
+ le = TRUE;
+ else
+ le = FALSE;
+
+ scsi_start_stop(&ccb->csio,
+ /*retries*/1,
+ camperiphdone,
+ MSG_SIMPLE_Q_TAG,
+ /*start*/TRUE,
+ /*load/eject*/le,
+ /*immediate*/FALSE,
+ SSD_FULL_SIZE,
+ /*timeout*/50000);
+ break;
+ }
+ case SS_TUR:
+ {
+ /*
+ * Send a Test Unit Ready to the device.
+ * If the 'many' flag is set, we send 120
+ * test unit ready commands, one every half
+ * second. Otherwise, we just send one TUR.
+ * We only want to do this if the retry
+ * count has not been exhausted.
+ */
+ int retries;
+
+ if ((err_action & SSQ_MANY) != 0) {
+ *action_string = "Polling device for readiness";
+ retries = 120;
+ } else {
+ *action_string = "Testing device for readiness";
+ retries = 1;
+ }
+ periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
+ scsi_test_unit_ready(&ccb->csio,
+ retries,
+ camperiphdone,
+ MSG_SIMPLE_Q_TAG,
+ SSD_FULL_SIZE,
+ /*timeout*/5000);
+
+ /*
+ * Accomplish our 500ms delay by deferring
+ * the release of our device queue appropriately.
+ */
+ *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
+ *timeout = 500;
+ break;
+ }
+ default:
+ panic("Unhandled error action %x", err_action);
+ }
+
+ if ((err_action & SS_MASK) >= SS_START) {
+ /*
+ * Drop the priority, so that the recovery
+ * CCB is the first to execute. Freeze the queue
+ * after this command is sent so that we can
+ * restore the old csio and have it queued in
+ * the proper order before we release normal
+ * transactions to the device.
+ */
+ ccb->ccb_h.pinfo.priority--;
+ ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
+ ccb->ccb_h.saved_ccb_ptr = orig_ccb;
+ error = ERESTART;
+ *orig = orig_ccb;
+ }
+
+sense_error_done:
+ *action = err_action;
+ }
+ return (error);
+}
+#endif
+/*
+ * Generic error handler. Peripheral drivers usually filter
+ * out the errors that they handle in a unique manner, then
+ * call this function.
+ */
+int
+cam_periph_error(union ccb *ccb, cam_flags camflags,
+ u_int32_t sense_flags)
+{
+ struct cam_path *newpath;
+ union ccb *orig_ccb, *scan_ccb;
+ struct cam_periph *periph;
+ const char *action_string;
+ cam_status status;
+ int frozen, error, openings, devctl_err;
+ u_int32_t action, relsim_flags, timeout;
+
+ action = SSQ_PRINT_SENSE;
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ action_string = NULL;
+ status = ccb->ccb_h.status;
+ frozen = (status & CAM_DEV_QFRZN) != 0;
+ status &= CAM_STATUS_MASK;
+ devctl_err = openings = relsim_flags = timeout = 0;
+ orig_ccb = ccb;
+
+ /* Filter the errors that should be reported via devctl */
+ switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
+ case CAM_CMD_TIMEOUT:
+ case CAM_REQ_ABORTED:
+ case CAM_REQ_CMP_ERR:
+ case CAM_REQ_TERMIO:
+ case CAM_UNREC_HBA_ERROR:
+ case CAM_DATA_RUN_ERR:
+ case CAM_SCSI_STATUS_ERROR:
+ case CAM_ATA_STATUS_ERROR:
+ case CAM_SMP_STATUS_ERROR:
+ devctl_err++;
+ break;
+ default:
+ break;
+ }
+
+ switch (status) {
+ case CAM_REQ_CMP:
+ error = 0;
+ action &= ~SSQ_PRINT_SENSE;
+ break;
+ case CAM_SCSI_STATUS_ERROR:
+ error = camperiphscsistatuserror(ccb, &orig_ccb,
+ camflags, sense_flags, &openings, &relsim_flags,
+ &timeout, &action, &action_string);
+ break;
+ case CAM_AUTOSENSE_FAIL:
+ error = EIO; /* we have to kill the command */
+ break;
+ case CAM_UA_ABORT:
+ case CAM_UA_TERMIO:
+ case CAM_MSG_REJECT_REC:
+ /* XXX Don't know that these are correct */
+ error = EIO;
+ break;
+ case CAM_SEL_TIMEOUT:
+ if ((camflags & CAM_RETRY_SELTO) != 0) {
+ if (ccb->ccb_h.retry_count > 0 &&
+ (periph->flags & CAM_PERIPH_INVALID) == 0) {
+ ccb->ccb_h.retry_count--;
+ error = ERESTART;
+
+ /*
+ * Wait a bit to give the device
+ * time to recover before we try again.
+ */
+ relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
+ timeout = periph_selto_delay;
+ break;
+ }
+ action_string = "Retries exhausted";
+ }
+ /* FALLTHROUGH */
+ case CAM_DEV_NOT_THERE:
+ error = ENXIO;
+ action = SSQ_LOST;
+ break;
+ case CAM_REQ_INVALID:
+ case CAM_PATH_INVALID:
+ case CAM_NO_HBA:
+ case CAM_PROVIDE_FAIL:
+ case CAM_REQ_TOO_BIG:
+ case CAM_LUN_INVALID:
+ case CAM_TID_INVALID:
+ case CAM_FUNC_NOTAVAIL:
+ error = EINVAL;
+ break;
+ case CAM_SCSI_BUS_RESET:
+ case CAM_BDR_SENT:
+ /*
+ * Commands that repeatedly timeout and cause these
+ * kinds of error recovery actions, should return
+ * CAM_CMD_TIMEOUT, which allows us to safely assume
+ * that this command was an innocent bystander to
+ * these events and should be unconditionally
+ * retried.
+ */
+ case CAM_REQUEUE_REQ:
+ /* Unconditional requeue if device is still there */
+ if (periph->flags & CAM_PERIPH_INVALID) {
+ action_string = "Periph was invalidated";
+ error = EIO;
+ } else if (sense_flags & SF_NO_RETRY) {
+ error = EIO;
+ action_string = "Retry was blocked";
+ } else {
+ error = ERESTART;
+ action &= ~SSQ_PRINT_SENSE;
+ }
+ break;
+ case CAM_RESRC_UNAVAIL:
+ /* Wait a bit for the resource shortage to abate. */
+ timeout = periph_noresrc_delay;
+ /* FALLTHROUGH */
+ case CAM_BUSY:
+ if (timeout == 0) {
+ /* Wait a bit for the busy condition to abate. */
+ timeout = periph_busy_delay;
+ }
+ relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
+ /* FALLTHROUGH */
+ case CAM_ATA_STATUS_ERROR:
+ case CAM_REQ_CMP_ERR:
+ case CAM_CMD_TIMEOUT:
+ case CAM_UNEXP_BUSFREE:
+ case CAM_UNCOR_PARITY:
+ case CAM_DATA_RUN_ERR:
+ default:
+ if (periph->flags & CAM_PERIPH_INVALID) {
+ error = EIO;
+ action_string = "Periph was invalidated";
+ } else if (ccb->ccb_h.retry_count == 0) {
+ error = EIO;
+ action_string = "Retries exhausted";
+ } else if (sense_flags & SF_NO_RETRY) {
+ error = EIO;
+ action_string = "Retry was blocked";
+ } else {
+ ccb->ccb_h.retry_count--;
+ error = ERESTART;
+ }
+ break;
+ }
+
+ if ((sense_flags & SF_PRINT_ALWAYS) ||
+ CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
+ action |= SSQ_PRINT_SENSE;
+ else if (sense_flags & SF_NO_PRINT)
+ action &= ~SSQ_PRINT_SENSE;
+ if ((action & SSQ_PRINT_SENSE) != 0)
+#ifndef __rtems__
+ cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
+ #endif /* __rtems__ */
+ if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
+ if (error != ERESTART) {
+ if (action_string == NULL)
+ action_string = "Unretryable error";
+ xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
+ error, action_string);
+ } else if (action_string != NULL)
+ xpt_print(ccb->ccb_h.path, "%s\n", action_string);
+ else
+ xpt_print(ccb->ccb_h.path, "Retrying command\n");
+ }
+
+ if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
+ cam_periph_devctl_notify(orig_ccb);
+
+ if ((action & SSQ_LOST) != 0) {
+ lun_id_t lun_id;
+
+ /*
+ * For a selection timeout, we consider all of the LUNs on
+ * the target to be gone. If the status is CAM_DEV_NOT_THERE,
+ * then we only get rid of the device(s) specified by the
+ * path in the original CCB.
+ */
+ if (status == CAM_SEL_TIMEOUT)
+ lun_id = CAM_LUN_WILDCARD;
+ else
+ lun_id = xpt_path_lun_id(ccb->ccb_h.path);
+
+ /* Should we do more if we can't create the path?? */
+ if (xpt_create_path(&newpath, periph,
+ xpt_path_path_id(ccb->ccb_h.path),
+ xpt_path_target_id(ccb->ccb_h.path),
+ lun_id) == CAM_REQ_CMP) {
+
+ /*
+ * Let peripheral drivers know that this
+ * device has gone away.
+ */
+ xpt_async(AC_LOST_DEVICE, newpath, NULL);
+ xpt_free_path(newpath);
+ }
+ }
+
+ /* Broadcast UNIT ATTENTIONs to all periphs. */
+ if ((action & SSQ_UA) != 0)
+ xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
+
+ /* Rescan target on "Reported LUNs data has changed" */
+ if ((action & SSQ_RESCAN) != 0) {
+ if (xpt_create_path(&newpath, NULL,
+ xpt_path_path_id(ccb->ccb_h.path),
+ xpt_path_target_id(ccb->ccb_h.path),
+ CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
+
+ scan_ccb = xpt_alloc_ccb_nowait();
+ if (scan_ccb != NULL) {
+ scan_ccb->ccb_h.path = newpath;
+ scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
+ scan_ccb->crcn.flags = 0;
+ xpt_rescan(scan_ccb);
+ } else {
+ xpt_print(newpath,
+ "Can't allocate CCB to rescan target\n");
+ xpt_free_path(newpath);
+ }
+ }
+ }
+
+ /* Attempt a retry */
+ if (error == ERESTART || error == 0) {
+ if (frozen != 0)
+ ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
+ if (error == ERESTART)
+ xpt_action(ccb);
+ if (frozen != 0)
+ cam_release_devq(ccb->ccb_h.path,
+ relsim_flags,
+ openings,
+ timeout,
+ /*getcount_only*/0);
+ }
+
+ return (error);
+}
+
+#define CAM_PERIPH_DEVD_MSG_SIZE 256
+
+static void
+cam_periph_devctl_notify(union ccb *ccb)
+{
+ struct cam_periph *periph;
+ struct ccb_getdev *cgd;
+ struct sbuf sb;
+ int serr, sk, asc, ascq;
+ char *sbmsg, *type;
+
+ sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
+ if (sbmsg == NULL)
+ return;
+
+ sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
+
+ periph = xpt_path_periph(ccb->ccb_h.path);
+ sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
+ periph->unit_number);
+
+ sbuf_printf(&sb, "serial=\"");
+ if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
+ xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
+ CAM_PRIORITY_NORMAL);
+ cgd->ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)cgd);
+
+ if (cgd->ccb_h.status == CAM_REQ_CMP)
+ sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
+ xpt_free_ccb((union ccb *)cgd);
+ }
+ sbuf_printf(&sb, "\" ");
+ sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
+
+ switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
+ case CAM_CMD_TIMEOUT:
+ sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
+ type = "timeout";
+ break;
+ case CAM_SCSI_STATUS_ERROR:
+#ifndef __rtems__
+ sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
+ if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
+ sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
+ serr, sk, asc, ascq);
+#endif /* __rtems__ */
+ type = "error";
+ break;
+ case CAM_ATA_STATUS_ERROR:
+#ifndef __rtems__
+ sbuf_printf(&sb, "RES=\"");
+ ata_res_sbuf(&ccb->ataio.res, &sb);
+ sbuf_printf(&sb, "\" ");
+#endif /* __rtems__ */
+ type = "error";
+ break;
+ default:
+ type = "error";
+ break;
+ }
+
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ sbuf_printf(&sb, "CDB=\"");
+#ifndef __rtems__
+ scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
+#endif /* __rtems__ */
+ sbuf_printf(&sb, "\" ");
+ } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
+ sbuf_printf(&sb, "ACB=\"");
+#ifndef __rtems__
+ ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
+#endif /* __rtems__ */
+ sbuf_printf(&sb, "\" ");
+ }
+
+ if (sbuf_finish(&sb) == 0)
+ devctl_notify("CAM", "periph", type, sbuf_data(&sb));
+ sbuf_delete(&sb);
+ free(sbmsg, M_CAMPERIPH);
+}
+
diff --git a/freebsd/sys/cam/cam_periph.h b/freebsd/sys/cam/cam_periph.h
index 87f153c..e20a777 100644
--- a/freebsd/sys/cam/cam_periph.h
+++ b/freebsd/sys/cam/cam_periph.h
@@ -195,7 +195,7 @@ void cam_periph_freeze_after_event(struct cam_periph *periph,
struct timeval* event_time,
u_int duration_ms);
int cam_periph_error(union ccb *ccb, cam_flags camflags,
- u_int32_t sense_flags, union ccb *save_ccb);
+ u_int32_t sense_flags);
static __inline struct mtx *
cam_periph_mtx(struct cam_periph *periph)
diff --git a/freebsd/sys/cam/cam_queue.c b/freebsd/sys/cam/cam_queue.c
new file mode 100644
index 0000000..247d03e
--- /dev/null
+++ b/freebsd/sys/cam/cam_queue.c
@@ -0,0 +1,399 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * CAM request queue management functions.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_queue.h>
+#include <cam/cam_debug.h>
+
+static MALLOC_DEFINE(M_CAMQ, "CAM queue", "CAM queue buffers");
+static MALLOC_DEFINE(M_CAMDEVQ, "CAM dev queue", "CAM dev queue buffers");
+static MALLOC_DEFINE(M_CAMCCBQ, "CAM ccb queue", "CAM ccb queue buffers");
+
+static __inline int
+ queue_cmp(cam_pinfo **queue_array, int i, int j);
+static __inline void
+ swap(cam_pinfo **queue_array, int i, int j);
+static void heap_up(cam_pinfo **queue_array, int new_index);
+static void heap_down(cam_pinfo **queue_array, int index,
+ int last_index);
+
+struct camq *
+camq_alloc(int size)
+{
+ struct camq *camq;
+ camq = (struct camq *)malloc(sizeof(*camq), M_CAMQ, M_NOWAIT);
+ if (camq != NULL) {
+ if (camq_init(camq, size) != 0) {
+ free(camq, M_CAMQ);
+ camq = NULL;
+ }
+ }
+ return (camq);
+}
+
+int
+camq_init(struct camq *camq, int size)
+{
+ bzero(camq, sizeof(*camq));
+ camq->array_size = size;
+ if (camq->array_size != 0) {
+ camq->queue_array = (cam_pinfo**)malloc(size*sizeof(cam_pinfo*),
+ M_CAMQ, M_NOWAIT);
+ if (camq->queue_array == NULL) {
+ printf("camq_init: - cannot malloc array!\n");
+ return (1);
+ }
+ /*
+ * Heap algorithms like everything numbered from 1, so
+ * offset our pointer into the heap array by one element.
+ */
+ camq->queue_array--;
+ }
+ return (0);
+}
+
+/*
+ * Free a camq structure. This should only be called if a controller
+ * driver failes somehow during its attach routine or is unloaded and has
+ * obtained a camq structure. The XPT should ensure that the queue
+ * is empty before calling this routine.
+ */
+void
+camq_free(struct camq *queue)
+{
+ if (queue != NULL) {
+ camq_fini(queue);
+ free(queue, M_CAMQ);
+ }
+}
+
+void
+camq_fini(struct camq *queue)
+{
+ if (queue->queue_array != NULL) {
+ /*
+ * Heap algorithms like everything numbered from 1, so
+ * our pointer into the heap array is offset by one element.
+ */
+ queue->queue_array++;
+ free(queue->queue_array, M_CAMQ);
+ }
+}
+
+u_int32_t
+camq_resize(struct camq *queue, int new_size)
+{
+ cam_pinfo **new_array;
+ KASSERT(new_size >= queue->entries, ("camq_resize: "
+ "New queue size can't accommodate queued entries (%d < %d).",
+ new_size, queue->entries));
+ new_array = (cam_pinfo **)malloc(new_size * sizeof(cam_pinfo *),
+ M_CAMQ, M_NOWAIT);
+ if (new_array == NULL) {
+ /* Couldn't satisfy request */
+ return (CAM_RESRC_UNAVAIL);
+ }
+ /*
+ * Heap algorithms like everything numbered from 1, so
+ * remember that our pointer into the heap array is offset
+ * by one element.
+ */
+ if (queue->queue_array != NULL) {
+ queue->queue_array++;
+ bcopy(queue->queue_array, new_array,
+ queue->entries * sizeof(cam_pinfo *));
+ free(queue->queue_array, M_CAMQ);
+ }
+ queue->queue_array = new_array-1;
+ queue->array_size = new_size;
+ return (CAM_REQ_CMP);
+}
+
+/*
+ * camq_insert: Given an array of cam_pinfo* elememnts with
+ * the Heap(1, num_elements) property and array_size - num_elements >= 1,
+ * output Heap(1, num_elements+1) including new_entry in the array.
+ */
+void
+camq_insert(struct camq *queue, cam_pinfo *new_entry)
+{
+ KASSERT(queue->entries < queue->array_size,
+ ("camq_insert: Attempt to insert into a full queue (%d >= %d)",
+ queue->entries, queue->array_size));
+ queue->entries++;
+ queue->queue_array[queue->entries] = new_entry;
+ new_entry->index = queue->entries;
+ if (queue->entries != 0)
+ heap_up(queue->queue_array, queue->entries);
+}
+
+/*
+ * camq_remove: Given an array of cam_pinfo* elevements with the
+ * Heap(1, num_elements) property and an index such that 1 <= index <=
+ * num_elements, remove that entry and restore the Heap(1, num_elements-1)
+ * property.
+ */
+cam_pinfo *
+camq_remove(struct camq *queue, int index)
+{
+ cam_pinfo *removed_entry;
+ if (index <= 0 || index > queue->entries)
+ panic("%s: Attempt to remove out-of-bounds index %d "
+ "from queue %p of size %d", __func__, index, queue,
+ queue->entries);
+
+ removed_entry = queue->queue_array[index];
+ if (queue->entries != index) {
+ queue->queue_array[index] = queue->queue_array[queue->entries];
+ queue->queue_array[index]->index = index;
+ heap_down(queue->queue_array, index, queue->entries - 1);
+ }
+ removed_entry->index = CAM_UNQUEUED_INDEX;
+ queue->entries--;
+ return (removed_entry);
+}
+
+/*
+ * camq_change_priority: Given an array of cam_pinfo* elements with the
+ * Heap(1, num_entries) property, an index such that 1 <= index <= num_elements,
+ * and a new priority for the element at index, change the priority of
+ * element index and restore the Heap(0, num_elements) property.
+ */
+void
+camq_change_priority(struct camq *queue, int index, u_int32_t new_priority)
+{
+ if (new_priority > queue->queue_array[index]->priority) {
+ queue->queue_array[index]->priority = new_priority;
+ heap_down(queue->queue_array, index, queue->entries);
+ } else {
+ /* new_priority <= old_priority */
+ queue->queue_array[index]->priority = new_priority;
+ heap_up(queue->queue_array, index);
+ }
+}
+
+struct cam_devq *
+cam_devq_alloc(int devices, int openings)
+{
+ struct cam_devq *devq;
+ devq = (struct cam_devq *)malloc(sizeof(*devq), M_CAMDEVQ, M_NOWAIT);
+ if (devq == NULL) {
+ printf("cam_devq_alloc: - cannot malloc!\n");
+ return (NULL);
+ }
+ if (cam_devq_init(devq, devices, openings) != 0) {
+ free(devq, M_CAMDEVQ);
+ return (NULL);
+ }
+ return (devq);
+}
+
+int
+cam_devq_init(struct cam_devq *devq, int devices, int openings)
+{
+ bzero(devq, sizeof(*devq));
+ mtx_init(&devq->send_mtx, "CAM queue lock", NULL, MTX_DEF);
+ if (camq_init(&devq->send_queue, devices) != 0)
+ return (1);
+ devq->send_openings = openings;
+ devq->send_active = 0;
+ return (0);
+}
+
+void
+cam_devq_free(struct cam_devq *devq)
+{
+ camq_fini(&devq->send_queue);
+ mtx_destroy(&devq->send_mtx);
+ free(devq, M_CAMDEVQ);
+}
+
+u_int32_t
+cam_devq_resize(struct cam_devq *camq, int devices)
+{
+ u_int32_t retval;
+ retval = camq_resize(&camq->send_queue, devices);
+ return (retval);
+}
+
+struct cam_ccbq *
+cam_ccbq_alloc(int openings)
+{
+ struct cam_ccbq *ccbq;
+ ccbq = (struct cam_ccbq *)malloc(sizeof(*ccbq), M_CAMCCBQ, M_NOWAIT);
+ if (ccbq == NULL) {
+ printf("cam_ccbq_alloc: - cannot malloc!\n");
+ return (NULL);
+ }
+ if (cam_ccbq_init(ccbq, openings) != 0) {
+ free(ccbq, M_CAMCCBQ);
+ return (NULL);
+ }
+
+ return (ccbq);
+}
+
+void
+cam_ccbq_free(struct cam_ccbq *ccbq)
+{
+ if (ccbq) {
+ cam_ccbq_fini(ccbq);
+ free(ccbq, M_CAMCCBQ);
+ }
+}
+
+u_int32_t
+cam_ccbq_resize(struct cam_ccbq *ccbq, int new_size)
+{
+ int delta;
+ delta = new_size - (ccbq->dev_active + ccbq->dev_openings);
+ ccbq->total_openings += delta;
+ ccbq->dev_openings += delta;
+
+ new_size = imax(64, 1 << fls(new_size + new_size / 2));
+ if (new_size > ccbq->queue.array_size)
+ return (camq_resize(&ccbq->queue, new_size));
+ else
+ return (CAM_REQ_CMP);
+}
+
+int
+cam_ccbq_init(struct cam_ccbq *ccbq, int openings)
+{
+ bzero(ccbq, sizeof(*ccbq));
+ if (camq_init(&ccbq->queue,
+ imax(64, 1 << fls(openings + openings / 2))) != 0)
+ return (1);
+ ccbq->total_openings = openings;
+ ccbq->dev_openings = openings;
+ return (0);
+}
+
+void
+cam_ccbq_fini(struct cam_ccbq *ccbq)
+{
+ camq_fini(&ccbq->queue);
+}
+
+/*
+ * Heap routines for manipulating CAM queues.
+ */
+/*
+ * queue_cmp: Given an array of cam_pinfo* elements and indexes i
+ * and j, return less than 0, 0, or greater than 0 if i is less than,
+ * equal too, or greater than j respectively.
+ */
+static __inline int
+queue_cmp(cam_pinfo **queue_array, int i, int j)
+{
+ if (queue_array[i]->priority == queue_array[j]->priority)
+ return ( queue_array[i]->generation
+ - queue_array[j]->generation );
+ else
+ return ( queue_array[i]->priority
+ - queue_array[j]->priority );
+}
+
+/*
+ * swap: Given an array of cam_pinfo* elements and indexes i and j,
+ * exchange elements i and j.
+ */
+static __inline void
+swap(cam_pinfo **queue_array, int i, int j)
+{
+ cam_pinfo *temp_qentry;
+
+ temp_qentry = queue_array[j];
+ queue_array[j] = queue_array[i];
+ queue_array[i] = temp_qentry;
+ queue_array[j]->index = j;
+ queue_array[i]->index = i;
+}
+
+/*
+ * heap_up: Given an array of cam_pinfo* elements with the
+ * Heap(1, new_index-1) property and a new element in location
+ * new_index, output Heap(1, new_index).
+ */
+static void
+heap_up(cam_pinfo **queue_array, int new_index)
+{
+ int child;
+ int parent;
+
+ child = new_index;
+
+ while (child != 1) {
+
+ parent = child >> 1;
+ if (queue_cmp(queue_array, parent, child) <= 0)
+ break;
+ swap(queue_array, parent, child);
+ child = parent;
+ }
+}
+
+/*
+ * heap_down: Given an array of cam_pinfo* elements with the
+ * Heap(index + 1, num_entries) property with index containing
+ * an unsorted entry, output Heap(index, num_entries).
+ */
+static void
+heap_down(cam_pinfo **queue_array, int index, int num_entries)
+{
+ int child;
+ int parent;
+
+ parent = index;
+ child = parent << 1;
+ for (; child <= num_entries; child = parent << 1) {
+
+ if (child < num_entries) {
+ /* child+1 is the right child of parent */
+ if (queue_cmp(queue_array, child + 1, child) < 0)
+ child++;
+ }
+ /* child is now the least child of parent */
+ if (queue_cmp(queue_array, parent, child) <= 0)
+ break;
+ swap(queue_array, child, parent);
+ parent = child;
+ }
+}
diff --git a/freebsd/sys/cam/cam_queue.h b/freebsd/sys/cam/cam_queue.h
new file mode 100644
index 0000000..455590a
--- /dev/null
+++ b/freebsd/sys/cam/cam_queue.h
@@ -0,0 +1,291 @@
+/*-
+ * CAM request queue management definitions.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_QUEUE_H
+#define _CAM_CAM_QUEUE_H 1
+
+#ifdef _KERNEL
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <cam/cam.h>
+
+/*
+ * This structure implements a heap based priority queue. The queue
+ * assumes that the objects stored in it begin with a cam_qentry
+ * structure holding the priority information used to sort the objects.
+ * This structure is opaque to clients (outside of the XPT layer) to allow
+ * the implementation to change without affecting them.
+ */
+struct camq {
+ cam_pinfo **queue_array;
+ int array_size;
+ int entries;
+ u_int32_t generation;
+ u_int32_t qfrozen_cnt;
+};
+
+TAILQ_HEAD(ccb_hdr_tailq, ccb_hdr);
+LIST_HEAD(ccb_hdr_list, ccb_hdr);
+SLIST_HEAD(ccb_hdr_slist, ccb_hdr);
+
+struct cam_ccbq {
+ struct camq queue;
+ struct ccb_hdr_tailq queue_extra_head;
+ int queue_extra_entries;
+ int total_openings;
+ int allocated;
+ int dev_openings;
+ int dev_active;
+};
+
+struct cam_ed;
+
+struct cam_devq {
+ struct mtx send_mtx;
+ struct camq send_queue;
+ int send_openings;
+ int send_active;
+};
+
+
+struct cam_devq *cam_devq_alloc(int devices, int openings);
+
+int cam_devq_init(struct cam_devq *devq, int devices,
+ int openings);
+
+void cam_devq_free(struct cam_devq *devq);
+
+u_int32_t cam_devq_resize(struct cam_devq *camq, int openings);
+
+/*
+ * Allocate a cam_ccb_queue structure and initialize it.
+ */
+struct cam_ccbq *cam_ccbq_alloc(int openings);
+
+u_int32_t cam_ccbq_resize(struct cam_ccbq *ccbq, int devices);
+
+int cam_ccbq_init(struct cam_ccbq *ccbq, int openings);
+
+void cam_ccbq_free(struct cam_ccbq *ccbq);
+
+void cam_ccbq_fini(struct cam_ccbq *ccbq);
+
+/*
+ * Allocate and initialize a cam_queue structure.
+ */
+struct camq *camq_alloc(int size);
+
+/*
+ * Resize a cam queue
+ */
+u_int32_t camq_resize(struct camq *queue, int new_size);
+
+/*
+ * Initialize a camq structure. Return 0 on success, 1 on failure.
+ */
+int camq_init(struct camq *camq, int size);
+
+/*
+ * Free a cam_queue structure. This should only be called if a controller
+ * driver failes somehow during its attach routine or is unloaded and has
+ * obtained a cam_queue structure.
+ */
+void camq_free(struct camq *queue);
+
+/*
+ * Finialize any internal storage or state of a cam_queue.
+ */
+void camq_fini(struct camq *queue);
+
+/*
+ * cam_queue_insert: Given a CAM queue with at least one open spot,
+ * insert the new entry maintaining order.
+ */
+void camq_insert(struct camq *queue, cam_pinfo *new_entry);
+
+/*
+ * camq_remove: Remove and arbitrary entry from the queue maintaining
+ * queue order.
+ */
+cam_pinfo *camq_remove(struct camq *queue, int index);
+#define CAMQ_HEAD 1 /* Head of queue index */
+
+/* Index the first element in the heap */
+#define CAMQ_GET_HEAD(camq) ((camq)->queue_array[CAMQ_HEAD])
+
+/* Get the first element priority. */
+#define CAMQ_GET_PRIO(camq) (((camq)->entries > 0) ? \
+ ((camq)->queue_array[CAMQ_HEAD]->priority) : 0)
+
+/*
+ * camq_change_priority: Raise or lower the priority of an entry
+ * maintaining queue order.
+ */
+void camq_change_priority(struct camq *queue, int index,
+ u_int32_t new_priority);
+
+static __inline int
+cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq);
+
+static __inline void
+cam_ccbq_take_opening(struct cam_ccbq *ccbq);
+
+static __inline void
+cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb);
+
+static __inline void
+cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb);
+
+static __inline union ccb *
+cam_ccbq_peek_ccb(struct cam_ccbq *ccbq, int index);
+
+static __inline void
+cam_ccbq_send_ccb(struct cam_ccbq *queue, union ccb *send_ccb);
+
+static __inline void
+cam_ccbq_ccb_done(struct cam_ccbq *ccbq, union ccb *done_ccb);
+
+static __inline void
+cam_ccbq_release_opening(struct cam_ccbq *ccbq);
+
+
+static __inline int
+cam_ccbq_pending_ccb_count(struct cam_ccbq *ccbq)
+{
+ return (ccbq->queue.entries + ccbq->queue_extra_entries);
+}
+
+static __inline void
+cam_ccbq_take_opening(struct cam_ccbq *ccbq)
+{
+ ccbq->allocated++;
+}
+
+static __inline void
+cam_ccbq_insert_ccb(struct cam_ccbq *ccbq, union ccb *new_ccb)
+{
+ struct ccb_hdr *old_ccb;
+ struct camq *queue = &ccbq->queue;
+
+ KASSERT((new_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0 &&
+ (new_ccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0,
+ ("%s: Cannot queue ccb %p func_code %#x", __func__, new_ccb,
+ new_ccb->ccb_h.func_code));
+
+ /*
+ * If queue is already full, try to resize.
+ * If resize fail, push CCB with lowest priority out to the TAILQ.
+ */
+ if (queue->entries == queue->array_size &&
+ camq_resize(&ccbq->queue, queue->array_size * 2) != CAM_REQ_CMP) {
+ old_ccb = (struct ccb_hdr *)camq_remove(queue, queue->entries);
+ TAILQ_INSERT_HEAD(&ccbq->queue_extra_head, old_ccb,
+ xpt_links.tqe);
+ old_ccb->pinfo.index = CAM_EXTRAQ_INDEX;
+ ccbq->queue_extra_entries++;
+ }
+
+ camq_insert(queue, &new_ccb->ccb_h.pinfo);
+}
+
+static __inline void
+cam_ccbq_remove_ccb(struct cam_ccbq *ccbq, union ccb *ccb)
+{
+ struct ccb_hdr *cccb, *bccb;
+ struct camq *queue = &ccbq->queue;
+ cam_pinfo *removed_entry __unused;