From 31e8ab2869a71aac5cec8e7ccd323dfd1bcdc057 Mon Sep 17 00:00:00 2001 From: Philip J Perry Date: Sat, 14 Jan 2023 21:42:18 +0000 Subject: [PATCH] mptsas: add mptctl driver to kmod-mptsas package kmod-mptsas now ships all the Fusion MPT drivers in a single kmod package merging kmod-mptspi and kmod-mptfc, and adding the mptctl module Signed-off-by: Philip J Perry --- mptsas-kmod/el8/Makefile | 5 +- mptsas-kmod/el8/mptctl.c | 3068 ++++++++++++++++++++++++++++++ mptsas-kmod/el8/mptctl.h | 467 +++++ mptsas-kmod/el8/mptfc.c | 1554 +++++++++++++++ mptsas-kmod/el8/mptlan.c | 1538 +++++++++++++++ mptsas-kmod/el8/mptlan.h | 129 ++ mptsas-kmod/el8/mptsas-kmod.spec | 18 +- mptsas-kmod/el9/Makefile | 1 + mptsas-kmod/el9/kmod-mptsas.spec | 7 +- 9 files changed, 6783 insertions(+), 4 deletions(-) create mode 100644 mptsas-kmod/el8/mptctl.c create mode 100644 mptsas-kmod/el8/mptctl.h create mode 100644 mptsas-kmod/el8/mptfc.c create mode 100644 mptsas-kmod/el8/mptlan.c create mode 100644 mptsas-kmod/el8/mptlan.h diff --git a/mptsas-kmod/el8/Makefile b/mptsas-kmod/el8/Makefile index 9a857479..ac5d5eae 100644 --- a/mptsas-kmod/el8/Makefile +++ b/mptsas-kmod/el8/Makefile @@ -28,6 +28,9 @@ clean: else -obj-m += mptbase.o mptscsih.o mptsas.o +obj-m += mptbase.o mptscsih.o mptspi.o +obj-m += mptbase.o mptscsih.o mptfc.o +obj-m += mptbase.o mptscsih.o mptsas.o +obj-m += mptctl.o endif diff --git a/mptsas-kmod/el8/mptctl.c b/mptsas-kmod/el8/mptctl.c new file mode 100644 index 00000000..4470630d --- /dev/null +++ b/mptsas-kmod/el8/mptctl.c @@ -0,0 +1,3068 @@ +/* + * linux/drivers/message/fusion/mptctl.c + * mpt Ioctl driver. + * For use with LSI PCI chip/adapters + * running LSI Fusion MPT (Message Passing Technology) firmware. + * + * Copyright (c) 1999-2008 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include +#include +#include +#include +#include +#include +#include +#include /* for mdelay */ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation" +#define MODULEAUTHOR "LSI Corporation" +#include "mptbase.h" +#include "mptctl.h" + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +#define my_NAME "Fusion MPT misc device (ioctl) driver" +#define my_VERSION MPT_LINUX_VERSION_COMMON +#define MYNAM "mptctl" + +MODULE_AUTHOR(MODULEAUTHOR); +MODULE_DESCRIPTION(my_NAME); +MODULE_LICENSE("GPL"); +MODULE_VERSION(my_VERSION); + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +static DEFINE_MUTEX(mpctl_mutex); +static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; +static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS; + +static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +struct buflist { + u8 *kptr; + int len; +}; + +/* + * Function prototypes. Called from OS entry point mptctl_ioctl. + * arg contents specific to function. + */ +static int mptctl_fw_download(unsigned long arg); +static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd); +static int mptctl_gettargetinfo(unsigned long arg); +static int mptctl_readtest(unsigned long arg); +static int mptctl_mpt_command(unsigned long arg); +static int mptctl_eventquery(unsigned long arg); +static int mptctl_eventenable(unsigned long arg); +static int mptctl_eventreport(unsigned long arg); +static int mptctl_replace_fw(unsigned long arg); + +static int mptctl_do_reset(unsigned long arg); +static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd); +static int mptctl_hp_targetinfo(unsigned long arg); + +static int mptctl_probe(struct pci_dev *, const struct pci_device_id *); +static void mptctl_remove(struct pci_dev *); + +#ifdef CONFIG_COMPAT +static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg); +#endif +/* + * Private function calls. + */ +static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr); +static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen); +static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags, + struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); +static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, + struct buflist *buflist, MPT_ADAPTER *ioc); + +/* + * Reset Handler cleanup function + */ +static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); + +/* + * Event Handler function + */ +static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +static struct fasync_struct *async_queue=NULL; + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Scatter gather list (SGL) sizes and limits... + */ +//#define MAX_SCSI_FRAGS 9 +#define MAX_FRAGS_SPILL1 9 +#define MAX_FRAGS_SPILL2 15 +#define FRAGS_PER_BUCKET (MAX_FRAGS_SPILL2 + 1) + +//#define MAX_CHAIN_FRAGS 64 +//#define MAX_CHAIN_FRAGS (15+15+15+16) +#define MAX_CHAIN_FRAGS (4 * MAX_FRAGS_SPILL2 + 1) + +// Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each) +// Works out to: 592d bytes! (9+1)*8 + 4*(15+1)*8 +// ^----------------- 80 + 512 +#define MAX_SGL_BYTES ((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8) + +/* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */ +#define MAX_KMALLOC_SZ (128*1024) + +#define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptctl_syscall_down - Down the MPT adapter syscall semaphore. + * @ioc: Pointer to MPT adapter + * @nonblock: boolean, non-zero if O_NONBLOCK is set + * + * All of the ioctl commands can potentially sleep, which is illegal + * with a spinlock held, thus we perform mutual exclusion here. + * + * Returns negative errno on error, or zero for success. + */ +static inline int +mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) +{ + int rc = 0; + + if (nonblock) { + if (!mutex_trylock(&ioc->ioctl_cmds.mutex)) + rc = -EAGAIN; + } else { + if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex)) + rc = -ERESTARTSYS; + } + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This is the callback for any message we have posted. The message itself + * will be returned to the message pool when we return from the IRQ + * + * This runs in irq context so be short and sweet. + */ +static int +mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) +{ + char *sense_data; + int req_index; + int sz; + + if (!req) + return 0; + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function " + "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function, + req, reply)); + + /* + * Handling continuation of the same reply. Processing the first + * reply, and eating the other replys that come later. + */ + if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext) + goto out_continuation; + + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + + if (!reply) + goto out; + + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength); + memcpy(ioc->ioctl_cmds.reply, reply, sz); + + if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo) + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name, + le16_to_cpu(reply->u.reply.IOCStatus), + le32_to_cpu(reply->u.reply.IOCLogInfo))); + + if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) || + (req->u.hdr.Function == + MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + + if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "scsi_status (0x%02x), scsi_state (0x%02x), " + "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, + reply->u.sreply.SCSIStatus, + reply->u.sreply.SCSIState, + le16_to_cpu(reply->u.sreply.TaskTag), + le32_to_cpu(reply->u.sreply.TransferCount))); + + if (reply->u.sreply.SCSIState & + MPI_SCSI_STATE_AUTOSENSE_VALID) { + sz = req->u.scsireq.SenseBufferLength; + req_index = + le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); + sense_data = ((u8 *)ioc->sense_buf_pool + + (req_index * MPT_SENSE_BUFFER_ALLOC)); + memcpy(ioc->ioctl_cmds.sense, sense_data, sz); + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID; + } + } + + out: + /* We are done, issue wake up + */ + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { + if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) { + mpt_clear_taskmgmt_in_progress_flag(ioc); + ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->ioctl_cmds.done); + if (ioc->bus_type == SAS) + ioc->schedule_target_reset(ioc); + } else { + ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->ioctl_cmds.done); + } + } + + out_continuation: + if (reply && (reply->u.reply.MsgFlags & + MPI_MSGFLAGS_CONTINUATION_REPLY)) + return 0; + return 1; +} + + +static int +mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +{ + if (!mf) + return 0; + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt completed (mf=%p, mr=%p)\n", + ioc->name, mf, mr)); + + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + + if (!mr) + goto out; + + ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; + memcpy(ioc->taskmgmt_cmds.reply, mr, + min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); + out: + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { + mpt_clear_taskmgmt_in_progress_flag(ioc); + ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; + complete(&ioc->taskmgmt_cmds.done); + if (ioc->bus_type == SAS) + ioc->schedule_target_reset(ioc); + return 1; + } + return 0; +} + +static int +mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id) +{ + MPT_FRAME_HDR *mf; + SCSITaskMgmt_t *pScsiTm; + SCSITaskMgmtReply_t *pScsiTmReply; + int ii; + int retval; + unsigned long timeout; + unsigned long time_count; + u16 iocstatus; + + + mutex_lock(&ioc->taskmgmt_cmds.mutex); + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + return -EPERM; + } + + retval = 0; + + mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); + if (mf == NULL) { + dtmprintk(ioc, + printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n", + ioc->name)); + mpt_clear_taskmgmt_in_progress_flag(ioc); + retval = -ENOMEM; + goto tm_done; + } + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", + ioc->name, mf)); + + pScsiTm = (SCSITaskMgmt_t *) mf; + memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); + pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + pScsiTm->TaskType = tm_type; + if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) && + (ioc->bus_type == FC)) + pScsiTm->MsgFlags = + MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; + pScsiTm->TargetID = target_id; + pScsiTm->Bus = bus_id; + pScsiTm->ChainOffset = 0; + pScsiTm->Reserved = 0; + pScsiTm->Reserved1 = 0; + pScsiTm->TaskMsgContext = 0; + for (ii= 0; ii < 8; ii++) + pScsiTm->LUN[ii] = 0; + for (ii=0; ii < 7; ii++) + pScsiTm->Reserved2[ii] = 0; + + switch (ioc->bus_type) { + case FC: + timeout = 40; + break; + case SAS: + timeout = 30; + break; + case SPI: + default: + timeout = 10; + break; + } + + dtmprintk(ioc, + printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n", + ioc->name, tm_type, timeout)); + + INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) + time_count = jiffies; + if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && + (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) + mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); + else { + retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, + sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); + if (retval != 0) { + dfailprintk(ioc, + printk(MYIOC_s_ERR_FMT + "TaskMgmt send_handshake FAILED!" + " (ioc %p, mf %p, rc=%d) \n", ioc->name, + ioc, mf, retval)); + mpt_free_msg_frame(ioc, mf); + mpt_clear_taskmgmt_in_progress_flag(ioc); + goto tm_done; + } + } + + /* Now wait for the command to complete */ + ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); + + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt failed\n", ioc->name)); + mpt_free_msg_frame(ioc, mf); + mpt_clear_taskmgmt_in_progress_flag(ioc); + if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) + retval = 0; + else + retval = -1; /* return failure */ + goto tm_done; + } + + if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt failed\n", ioc->name)); + retval = -1; /* return failure */ + goto tm_done; + } + + pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " + "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " + "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, + pScsiTmReply->TargetID, tm_type, + le16_to_cpu(pScsiTmReply->IOCStatus), + le32_to_cpu(pScsiTmReply->IOCLogInfo), + pScsiTmReply->ResponseCode, + le32_to_cpu(pScsiTmReply->TerminationCount))); + + iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + + if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || + iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED || + iocstatus == MPI_IOCSTATUS_SUCCESS) + retval = 0; + else { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TaskMgmt failed\n", ioc->name)); + retval = -1; /* return failure */ + } + + tm_done: + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) + return retval; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_timeout_expired + * + * Expecting an interrupt, however timed out. + * + */ +static void +mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) +{ + unsigned long flags; + int ret_val = -1; + SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf; + u8 function = mf->u.hdr.Function; + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", + ioc->name, __func__)); + + if (mpt_fwfault_debug) + mpt_halt_firmware(ioc); + + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) + mpt_free_msg_frame(ioc, mf); + return; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + + + CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) + + if (ioc->bus_type == SAS) { + if (function == MPI_FUNCTION_SCSI_IO_REQUEST) + ret_val = mptctl_do_taskmgmt(ioc, + MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + scsi_req->Bus, scsi_req->TargetID); + else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) + ret_val = mptctl_do_taskmgmt(ioc, + MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + scsi_req->Bus, 0); + if (!ret_val) + return; + } else { + if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) || + (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) + ret_val = mptctl_do_taskmgmt(ioc, + MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + scsi_req->Bus, 0); + if (!ret_val) + return; + } + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n", + ioc->name)); + mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); + mpt_free_msg_frame(ioc, mf); +} + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_ioc_reset + * + * Clean-up functionality. Used only if there has been a + * reload of the FW due. + * + */ +static int +mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + switch(reset_phase) { + case MPT_IOC_SETUP_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); + break; + case MPT_IOC_PRE_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT_IOC_POST_RESET: + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { + ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET; + complete(&ioc->ioctl_cmds.done); + } + break; + default: + break; + } + + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* ASYNC Event Notification Support */ +static int +mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) +{ + u8 event; + + event = le32_to_cpu(pEvReply->Event) & 0xFF; + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n", + ioc->name, __func__)); + if(async_queue == NULL) + return 1; + + /* Raise SIGIO for persistent events. + * TODO - this define is not in MPI spec yet, + * but they plan to set it to 0x21 + */ + if (event == 0x21 ) { + ioc->aen_event_read_flag=1; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n", + ioc->name)); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Raised SIGIO to application\n", ioc->name)); + kill_fasync(&async_queue, SIGIO, POLL_IN); + return 1; + } + + /* This flag is set after SIGIO was raised, and + * remains set until the application has read + * the event log via ioctl=MPTEVENTREPORT + */ + if(ioc->aen_event_read_flag) + return 1; + + /* Signal only for the events that are + * requested for by the application + */ + if (ioc->events && (ioc->eventTypes & ( 1 << event))) { + ioc->aen_event_read_flag=1; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Raised SIGIO to application\n", ioc->name)); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Raised SIGIO to application\n", ioc->name)); + kill_fasync(&async_queue, SIGIO, POLL_IN); + } + return 1; +} + +static int +mptctl_fasync(int fd, struct file *filep, int mode) +{ + MPT_ADAPTER *ioc; + int ret; + + mutex_lock(&mpctl_mutex); + list_for_each_entry(ioc, &ioc_list, list) + ioc->aen_event_read_flag=0; + + ret = fasync_helper(fd, filep, mode, &async_queue); + mutex_unlock(&mpctl_mutex); + return ret; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * MPT ioctl handler + * cmd - specify the particular IOCTL command to be issued + * arg - data specific to the command. Must not be null. + */ +static long +__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + mpt_ioctl_header __user *uhdr = (void __user *) arg; + mpt_ioctl_header khdr; + int iocnum; + unsigned iocnumX; + int nonblock = (file->f_flags & O_NONBLOCK); + int ret; + MPT_ADAPTER *iocp = NULL; + + if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { + printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " + "Unable to copy mpt_ioctl_header data @ %p\n", + __FILE__, __LINE__, uhdr); + return -EFAULT; + } + ret = -ENXIO; /* (-6) No such device or address */ + + /* Verify intended MPT adapter - set iocnum and the adapter + * pointer (iocp) + */ + iocnumX = khdr.iocnum & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) + return -ENODEV; + + if (!iocp->active) { + printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", + __FILE__, __LINE__); + return -EFAULT; + } + + /* Handle those commands that are just returning + * information stored in the driver. + * These commands should never time out and are unaffected + * by TM and FW reloads. + */ + if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { + return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); + } else if (cmd == MPTTARGETINFO) { + return mptctl_gettargetinfo(arg); + } else if (cmd == MPTTEST) { + return mptctl_readtest(arg); + } else if (cmd == MPTEVENTQUERY) { + return mptctl_eventquery(arg); + } else if (cmd == MPTEVENTENABLE) { + return mptctl_eventenable(arg); + } else if (cmd == MPTEVENTREPORT) { + return mptctl_eventreport(arg); + } else if (cmd == MPTFWREPLACE) { + return mptctl_replace_fw(arg); + } + + /* All of these commands require an interrupt or + * are unknown/illegal. + */ + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + if (cmd == MPTFWDOWNLOAD) + ret = mptctl_fw_download(arg); + else if (cmd == MPTCOMMAND) + ret = mptctl_mpt_command(arg); + else if (cmd == MPTHARDRESET) + ret = mptctl_do_reset(arg); + else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) + ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); + else if (cmd == HP_GETTARGETINFO) + ret = mptctl_hp_targetinfo(arg); + else + ret = -EINVAL; + + mutex_unlock(&iocp->ioctl_cmds.mutex); + + return ret; +} + +static long +mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + mutex_lock(&mpctl_mutex); + ret = __mptctl_ioctl(file, cmd, arg); + mutex_unlock(&mpctl_mutex); + return ret; +} + +static int mptctl_do_reset(unsigned long arg) +{ + struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; + struct mpt_ioctl_diag_reset krinfo; + MPT_ADAPTER *iocp; + + if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " + "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", + __FILE__, __LINE__, urinfo); + return -EFAULT; + } + + if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { + printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", + __FILE__, __LINE__, krinfo.hdr.iocnum); + return -ENODEV; /* (-6) No such device or address */ + } + + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", + iocp->name)); + + if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) { + printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n", + iocp->name, __FILE__, __LINE__); + return -1; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * MPT FW download function. Cast the arg into the mpt_fw_xfer structure. + * This structure contains: iocnum, firmware length (bytes), + * pointer to user space memory where the fw image is stored. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENXIO if no such device + * -EAGAIN if resource problem + * -ENOMEM if no memory for SGE + * -EMLINK if too many chain buffers required + * -EBADRQC if adapter does not support FW download + * -EBUSY if adapter is busy + * -ENOMSG if FW upload returned bad status + */ +static int +mptctl_fw_download(unsigned long arg) +{ + struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; + struct mpt_fw_xfer kfwdl; + + if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { + printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - " + "Unable to copy mpt_fw_xfer struct @ %p\n", + __FILE__, __LINE__, ufwdl); + return -EFAULT; + } + + return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * FW Download engine. + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENXIO if no such device + * -EAGAIN if resource problem + * -ENOMEM if no memory for SGE + * -EMLINK if too many chain buffers required + * -EBADRQC if adapter does not support FW download + * -EBUSY if adapter is busy + * -ENOMSG if FW upload returned bad status + */ +static int +mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) +{ + FWDownload_t *dlmsg; + MPT_FRAME_HDR *mf; + MPT_ADAPTER *iocp; + FWDownloadTCSGE_t *ptsge; + MptSge_t *sgl, *sgIn; + char *sgOut; + struct buflist *buflist; + struct buflist *bl; + dma_addr_t sgl_dma; + int ret; + int numfrags = 0; + int maxfrags; + int n = 0; + u32 sgdir; + u32 nib; + int fw_bytes_copied = 0; + int i; + int sge_offset = 0; + u16 iocstat; + pFWDownloadReply_t ReplyMsg = NULL; + unsigned long timeleft; + + if (mpt_verify_adapter(ioc, &iocp) < 0) { + printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", + ioc); + return -ENODEV; /* (-6) No such device or address */ + } else { + + /* Valid device. Get a message frame and construct the FW download message. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) + return -EAGAIN; + } + + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT + "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n", + iocp->name, ufwbuf)); + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", + iocp->name, (int)fwlen)); + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", + iocp->name, ioc)); + + dlmsg = (FWDownload_t*) mf; + ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; + sgOut = (char *) (ptsge + 1); + + /* + * Construct f/w download request + */ + dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW; + dlmsg->Reserved = 0; + dlmsg->ChainOffset = 0; + dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; + dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; + if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) + dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; + else + dlmsg->MsgFlags = 0; + + + /* Set up the Transaction SGE. + */ + ptsge->Reserved = 0; + ptsge->ContextSize = 0; + ptsge->DetailsLength = 12; + ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; + ptsge->Reserved_0100_Checksum = 0; + ptsge->ImageOffset = 0; + ptsge->ImageSize = cpu_to_le32(fwlen); + + /* Add the SGL + */ + + /* + * Need to kmalloc area(s) for holding firmware image bytes. + * But we need to do it piece meal, using a proper + * scatter gather list (with 128kB MAX hunks). + * + * A practical limit here might be # of sg hunks that fit into + * a single IOC request frame; 12 or 8 (see below), so: + * For FC9xx: 12 x 128kB == 1.5 mB (max) + * For C1030: 8 x 128kB == 1 mB (max) + * We could support chaining, but things get ugly(ier:) + * + * Set the sge_offset to the start of the sgl (bytes). + */ + sgdir = 0x04000000; /* IOC will READ from sys mem */ + sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); + if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, + &numfrags, &buflist, &sgl_dma, iocp)) == NULL) + return -ENOMEM; + + /* + * We should only need SGL with 2 simple_32bit entries (up to 256 kB) + * for FC9xx f/w image, but calculate max number of sge hunks + * we can fit into a request frame, and limit ourselves to that. + * (currently no chain support) + * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE + * Request maxfrags + * 128 12 + * 96 8 + * 64 4 + */ + maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - + sizeof(FWDownloadTCSGE_t)) + / iocp->SGE_size; + if (numfrags > maxfrags) { + ret = -EMLINK; + goto fwdl_out; + } + + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n", + iocp->name, sgl, numfrags)); + + /* + * Parse SG list, copying sgl itself, + * plus f/w image hunks from user space as we go... + */ + ret = -EFAULT; + sgIn = sgl; + bl = buflist; + for (i=0; i < numfrags; i++) { + + /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE + * Skip everything but Simple. If simple, copy from + * user space into kernel space. + * Note: we should not have anything but Simple as + * Chain SGE are illegal. + */ + nib = (sgIn->FlagsLength & 0x30000000) >> 28; + if (nib == 0 || nib == 3) { + ; + } else if (sgIn->Address) { + iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); + n++; + if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { + printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " + "Unable to copy f/w buffer hunk#%d @ %p\n", + iocp->name, __FILE__, __LINE__, n, ufwbuf); + goto fwdl_out; + } + fw_bytes_copied += bl->len; + } + sgIn++; + bl++; + sgOut += iocp->SGE_size; + } + + DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); + + /* + * Finally, perform firmware download. + */ + ReplyMsg = NULL; + SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); + INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) + mpt_put_msg_frame(mptctl_id, iocp, mf); + + /* Now wait for the command to complete */ +retry_wait: + timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); + if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + ret = -ETIME; + printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); + if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + mpt_free_msg_frame(iocp, mf); + goto fwdl_out; + } + if (!timeleft) { + printk(MYIOC_s_WARN_FMT + "FW download timeout, doorbell=0x%08x\n", + iocp->name, mpt_GetIocState(iocp, 0)); + mptctl_timeout_expired(iocp, mf); + } else + goto retry_wait; + goto fwdl_out; + } + + if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { + printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); + mpt_free_msg_frame(iocp, mf); + ret = -ENODATA; + goto fwdl_out; + } + + if (sgl) + kfree_sgl(sgl, sgl_dma, buflist, iocp); + + ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; + iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; + if (iocstat == MPI_IOCSTATUS_SUCCESS) { + printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name); + return 0; + } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) { + printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n", + iocp->name); + printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n", + iocp->name); + return -EBADRQC; + } else if (iocstat == MPI_IOCSTATUS_BUSY) { + printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name); + printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name); + return -EBUSY; + } else { + printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n", + iocp->name, iocstat); + printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name); + return -ENOMSG; + } + return 0; + +fwdl_out: + + CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); + SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); + kfree_sgl(sgl, sgl_dma, buflist, iocp); + return ret; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * SGE Allocation routine + * + * Inputs: bytes - number of bytes to be transferred + * sgdir - data direction + * sge_offset - offset (in bytes) from the start of the request + * frame to the first SGE + * ioc - pointer to the mptadapter + * Outputs: frags - number of scatter gather elements + * blp - point to the buflist pointer + * sglbuf_dma - pointer to the (dma) sgl + * Returns: Null if failes + * pointer to the (virtual) sgl if successful. + */ +static MptSge_t * +kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, + struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) +{ + MptSge_t *sglbuf = NULL; /* pointer to array of SGE */ + /* and chain buffers */ + struct buflist *buflist = NULL; /* kernel routine */ + MptSge_t *sgl; + int numfrags = 0; + int fragcnt = 0; + int alloc_sz = min(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg! + int bytes_allocd = 0; + int this_alloc; + dma_addr_t pa; // phys addr + int i, buflist_ent; + int sg_spill = MAX_FRAGS_SPILL1; + int dir; + + if (bytes < 0) + return NULL; + + /* initialization */ + *frags = 0; + *blp = NULL; + + /* Allocate and initialize an array of kernel + * structures for the SG elements. + */ + i = MAX_SGL_BYTES / 8; + buflist = kzalloc(i, GFP_USER); + if (!buflist) + return NULL; + buflist_ent = 0; + + /* Allocate a single block of memory to store the sg elements and + * the chain buffers. The calling routine is responsible for + * copying the data in this array into the correct place in the + * request and chain buffers. + */ + sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma); + if (sglbuf == NULL) + goto free_and_fail; + + if (sgdir & 0x04000000) + dir = PCI_DMA_TODEVICE; + else + dir = PCI_DMA_FROMDEVICE; + + /* At start: + * sgl = sglbuf = point to beginning of sg buffer + * buflist_ent = 0 = first kernel structure + * sg_spill = number of SGE that can be written before the first + * chain element. + * + */ + sgl = sglbuf; + sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1; + while (bytes_allocd < bytes) { + this_alloc = min(alloc_sz, bytes-bytes_allocd); + buflist[buflist_ent].len = this_alloc; + buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev, + this_alloc, + &pa); + if (buflist[buflist_ent].kptr == NULL) { + alloc_sz = alloc_sz / 2; + if (alloc_sz == 0) { + printk(MYIOC_s_WARN_FMT "-SG: No can do - " + "not enough memory! :-(\n", ioc->name); + printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", + ioc->name, numfrags); + goto free_and_fail; + } + continue; + } else { + dma_addr_t dma_addr; + + bytes_allocd += this_alloc; + sgl->FlagsLength = (0x10000000|sgdir|this_alloc); + dma_addr = pci_map_single(ioc->pcidev, + buflist[buflist_ent].kptr, this_alloc, dir); + sgl->Address = dma_addr; + + fragcnt++; + numfrags++; + sgl++; + buflist_ent++; + } + + if (bytes_allocd >= bytes) + break; + + /* Need to chain? */ + if (fragcnt == sg_spill) { + printk(MYIOC_s_WARN_FMT + "-SG: No can do - " "Chain required! :-(\n", ioc->name); + printk(MYIOC_s_WARN_FMT "(freeing %d frags)\n", ioc->name, numfrags); + goto free_and_fail; + } + + /* overflow check... */ + if (numfrags*8 > MAX_SGL_BYTES){ + /* GRRRRR... */ + printk(MYIOC_s_WARN_FMT "-SG: No can do - " + "too many SG frags! :-(\n", ioc->name); + printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n", + ioc->name, numfrags); + goto free_and_fail; + } + } + + /* Last sge fixup: set LE+eol+eob bits */ + sgl[-1].FlagsLength |= 0xC1000000; + + *frags = numfrags; + *blp = buflist; + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - " + "%d SG frags generated!\n", ioc->name, numfrags)); + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - " + "last (big) alloc_sz=%d\n", ioc->name, alloc_sz)); + + return sglbuf; + +free_and_fail: + if (sglbuf != NULL) { + for (i = 0; i < numfrags; i++) { + dma_addr_t dma_addr; + u8 *kptr; + int len; + + if ((sglbuf[i].FlagsLength >> 24) == 0x30) + continue; + + dma_addr = sglbuf[i].Address; + kptr = buflist[i].kptr; + len = buflist[i].len; + + pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); + } + pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma); + } + kfree(buflist); + return NULL; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Routine to free the SGL elements. + */ +static void +kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) +{ + MptSge_t *sg = sgl; + struct buflist *bl = buflist; + u32 nib; + int dir; + int n = 0; + + if (sg->FlagsLength & 0x04000000) + dir = PCI_DMA_TODEVICE; + else + dir = PCI_DMA_FROMDEVICE; + + nib = (sg->FlagsLength & 0xF0000000) >> 28; + while (! (nib & 0x4)) { /* eob */ + /* skip ignore/chain. */ + if (nib == 0 || nib == 3) { + ; + } else if (sg->Address) { + dma_addr_t dma_addr; + void *kptr; + int len; + + dma_addr = sg->Address; + kptr = bl->kptr; + len = bl->len; + pci_unmap_single(ioc->pcidev, dma_addr, len, dir); + pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); + n++; + } + sg++; + bl++; + nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28; + } + + /* we're at eob! */ + if (sg->Address) { + dma_addr_t dma_addr; + void *kptr; + int len; + + dma_addr = sg->Address; + kptr = bl->kptr; + len = bl->len; + pci_unmap_single(ioc->pcidev, dma_addr, len, dir); + pci_free_consistent(ioc->pcidev, len, kptr, dma_addr); + n++; + } + + pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma); + kfree(buflist); + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n", + ioc->name, n)); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_getiocinfo - Query the host adapter for IOC information. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_getiocinfo (unsigned long arg, unsigned int data_size) +{ + struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; + struct mpt_ioctl_iocinfo *karg; + MPT_ADAPTER *ioc; + struct pci_dev *pdev; + int iocnum; + unsigned int port; + int cim_rev; + struct scsi_device *sdev; + VirtDevice *vdevice; + + /* Add of PCI INFO results in unaligned access for + * IA64 and Sparc. Reset long to int. Return no PCI + * data for obsolete format. + */ + if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0)) + cim_rev = 0; + else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1)) + cim_rev = 1; + else if (data_size == sizeof(struct mpt_ioctl_iocinfo)) + cim_rev = 2; + else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12)) + cim_rev = 0; /* obsolete */ + else + return -EFAULT; + + karg = memdup_user(uarg, data_size); + if (IS_ERR(karg)) { + printk(KERN_ERR MYNAM "%s@%d::mpt_ioctl_iocinfo() - memdup_user returned error [%ld]\n", + __FILE__, __LINE__, PTR_ERR(karg)); + return PTR_ERR(karg); + } + + if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + kfree(karg); + return -ENODEV; + } + + /* Verify the data transfer size is correct. */ + if (karg->hdr.maxDataSize != data_size) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " + "Structure size mismatch. Command not completed.\n", + ioc->name, __FILE__, __LINE__); + kfree(karg); + return -EFAULT; + } + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n", + ioc->name)); + + /* Fill in the data and return the structure to the calling + * program + */ + if (ioc->bus_type == SAS) + karg->adapterType = MPT_IOCTL_INTERFACE_SAS; + else if (ioc->bus_type == FC) + karg->adapterType = MPT_IOCTL_INTERFACE_FC; + else + karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; + + if (karg->hdr.port > 1) { + kfree(karg); + return -EINVAL; + } + port = karg->hdr.port; + + karg->port = port; + pdev = (struct pci_dev *) ioc->pcidev; + + karg->pciId = pdev->device; + karg->hwRev = pdev->revision; + karg->subSystemDevice = pdev->subsystem_device; + karg->subSystemVendor = pdev->subsystem_vendor; + + if (cim_rev == 1) { + /* Get the PCI bus, device, and function numbers for the IOC + */ + karg->pciInfo.u.bits.busNumber = pdev->bus->number; + karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); + karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); + } else if (cim_rev == 2) { + /* Get the PCI bus, device, function and segment ID numbers + for the IOC */ + karg->pciInfo.u.bits.busNumber = pdev->bus->number; + karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); + karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); + karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); + } + + /* Get number of devices + */ + karg->numDevices = 0; + if (ioc->sh) { + shost_for_each_device(sdev, ioc->sh) { + vdevice = sdev->hostdata; + if (vdevice == NULL || vdevice->vtarget == NULL) + continue; + if (vdevice->vtarget->tflags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + karg->numDevices++; + } + } + + /* Set the BIOS and FW Version + */ + karg->FWVersion = ioc->facts.FWVersion.Word; + karg->BIOSVersion = ioc->biosVersion; + + /* Set the Version Strings. + */ + strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); + karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0'; + + karg->busChangeEvent = 0; + karg->hostId = ioc->pfacts[port].PortSCSIID; + karg->rsvd[0] = karg->rsvd[1] = 0; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char __user *)arg, karg, data_size)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " + "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", + ioc->name, __FILE__, __LINE__, uarg); + kfree(karg); + return -EFAULT; + } + + kfree(karg); + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_gettargetinfo - Query the host adapter for target information. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_gettargetinfo (unsigned long arg) +{ + struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; + struct mpt_ioctl_targetinfo karg; + MPT_ADAPTER *ioc; + VirtDevice *vdevice; + char *pmem; + int *pdata; + int iocnum; + int numDevices = 0; + int lun; + int maxWordsLeft; + int numBytes; + u8 port; + struct scsi_device *sdev; + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - " + "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", + ioc->name)); + /* Get the port number and set the maximum number of bytes + * in the returned structure. + * Ignore the port setting. + */ + numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); + maxWordsLeft = numBytes/sizeof(int); + port = karg.hdr.port; + + if (maxWordsLeft <= 0) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", + ioc->name, __FILE__, __LINE__); + return -ENOMEM; + } + + /* Fill in the data and return the structure to the calling + * program + */ + + /* struct mpt_ioctl_targetinfo does not contain sufficient space + * for the target structures so when the IOCTL is called, there is + * not sufficient stack space for the structure. Allocate memory, + * populate the memory, copy back to the user, then free memory. + * targetInfo format: + * bits 31-24: reserved + * 23-16: LUN + * 15- 8: Bus Number + * 7- 0: Target ID + */ + pmem = kzalloc(numBytes, GFP_KERNEL); + if (!pmem) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", + ioc->name, __FILE__, __LINE__); + return -ENOMEM; + } + pdata = (int *) pmem; + + /* Get number of devices + */ + if (ioc->sh){ + shost_for_each_device(sdev, ioc->sh) { + if (!maxWordsLeft) + continue; + vdevice = sdev->hostdata; + if (vdevice == NULL || vdevice->vtarget == NULL) + continue; + if (vdevice->vtarget->tflags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun; + *pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) + + (vdevice->vtarget->id )); + pdata++; + numDevices++; + --maxWordsLeft; + } + } + karg.numDevices = numDevices; + + /* Copy part of the data from kernel memory to user memory + */ + if (copy_to_user((char __user *)arg, &karg, + sizeof(struct mpt_ioctl_targetinfo))) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + ioc->name, __FILE__, __LINE__, uarg); + kfree(pmem); + return -EFAULT; + } + + /* Copy the remaining data from kernel memory to user memory + */ + if (copy_to_user(uarg->targetInfo, pmem, numBytes)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + ioc->name, __FILE__, __LINE__, pdata); + kfree(pmem); + return -EFAULT; + } + + kfree(pmem); + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* MPT IOCTL Test function. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_readtest (unsigned long arg) +{ + struct mpt_ioctl_test __user *uarg = (void __user *) arg; + struct mpt_ioctl_test karg; + MPT_ADAPTER *ioc; + int iocnum; + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - " + "Unable to read in mpt_ioctl_test struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n", + ioc->name)); + /* Fill in the data and return the structure to the calling + * program + */ + +#ifdef MFCNT + karg.chip_type = ioc->mfcnt; +#else + karg.chip_type = ioc->pcidev->device; +#endif + strncpy (karg.name, ioc->name, MPT_MAX_NAME); + karg.name[MPT_MAX_NAME-1]='\0'; + strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH); + karg.product[MPT_PRODUCT_LENGTH-1]='\0'; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_readtest - " + "Unable to write out mpt_ioctl_test struct @ %p\n", + ioc->name, __FILE__, __LINE__, uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_eventquery - Query the host adapter for the event types + * that are being logged. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_eventquery (unsigned long arg) +{ + struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; + struct mpt_ioctl_eventquery karg; + MPT_ADAPTER *ioc; + int iocnum; + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " + "Unable to read in mpt_ioctl_eventquery struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", + ioc->name)); + karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; + karg.eventTypes = ioc->eventTypes; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - " + "Unable to write out mpt_ioctl_eventquery struct @ %p\n", + ioc->name, __FILE__, __LINE__, uarg); + return -EFAULT; + } + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_eventenable (unsigned long arg) +{ + struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; + struct mpt_ioctl_eventenable karg; + MPT_ADAPTER *ioc; + int iocnum; + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " + "Unable to read in mpt_ioctl_eventenable struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", + ioc->name)); + if (ioc->events == NULL) { + /* Have not yet allocated memory - do so now. + */ + int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); + ioc->events = kzalloc(sz, GFP_KERNEL); + if (!ioc->events) { + printk(MYIOC_s_ERR_FMT + ": ERROR - Insufficient memory to add adapter!\n", + ioc->name); + return -ENOMEM; + } + ioc->alloc_total += sz; + + ioc->eventContext = 0; + } + + /* Update the IOC event logging flag. + */ + ioc->eventTypes = karg.eventTypes; + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_eventreport (unsigned long arg) +{ + struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg; + struct mpt_ioctl_eventreport karg; + MPT_ADAPTER *ioc; + int iocnum; + int numBytes, maxEvents, max; + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - " + "Unable to read in mpt_ioctl_eventreport struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n", + ioc->name)); + + numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); + maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); + + + max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents; + + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if ((max < 1) || !ioc->events) + return -ENODATA; + + /* reset this flag so SIGIO can restart */ + ioc->aen_event_read_flag=0; + + /* Copy the data from kernel memory to user memory + */ + numBytes = max * sizeof(MPT_IOCTL_EVENTS); + if (copy_to_user(uarg->eventData, ioc->events, numBytes)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - " + "Unable to write out mpt_ioctl_eventreport struct @ %p\n", + ioc->name, __FILE__, __LINE__, ioc->events); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_replace_fw (unsigned long arg) +{ + struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg; + struct mpt_ioctl_replace_fw karg; + MPT_ADAPTER *ioc; + int iocnum; + int newFwSize; + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_replace_fw - " + "Unable to read in mpt_ioctl_replace_fw struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n", + ioc->name)); + /* If caching FW, Free the old FW image + */ + if (ioc->cached_fw == NULL) + return 0; + + mpt_free_fw_memory(ioc); + + /* Allocate memory for the new FW image + */ + newFwSize = ALIGN(karg.newImageSize, 4); + + mpt_alloc_fw_memory(ioc, newFwSize); + if (ioc->cached_fw == NULL) + return -ENOMEM; + + /* Copy the data from user memory to kernel space + */ + if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - " + "Unable to read in mpt_ioctl_replace_fw image " + "@ %p\n", ioc->name, __FILE__, __LINE__, uarg); + mpt_free_fw_memory(ioc); + return -EFAULT; + } + + /* Update IOCFactsReply + */ + ioc->facts.FWImageSize = newFwSize; + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* MPT IOCTL MPTCOMMAND function. + * Cast the arg into the mpt_ioctl_mpt_command structure. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timeout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_mpt_command (unsigned long arg) +{ + struct mpt_ioctl_command __user *uarg = (void __user *) arg; + struct mpt_ioctl_command karg; + MPT_ADAPTER *ioc; + int iocnum; + int rc; + + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - " + "Unable to read in mpt_ioctl_command struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + rc = mptctl_do_mpt_command (karg, &uarg->MF); + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timeout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + * -EPERM if SCSI I/O and target is untagged + */ +static int +mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) +{ + MPT_ADAPTER *ioc; + MPT_FRAME_HDR *mf = NULL; + MPIHeader_t *hdr; + char *psge; + struct buflist bufIn; /* data In buffer */ + struct buflist bufOut; /* data Out buffer */ + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + int sgSize = 0; /* Num SG elements */ + int iocnum, flagsLength; + int sz, rc = 0; + int msgContext; + u16 req_idx; + ulong timeout; + unsigned long timeleft; + struct scsi_device *sdev; + unsigned long flags; + u8 function; + + /* bufIn and bufOut are used for user to kernel space transfers + */ + bufIn.kptr = bufOut.kptr = NULL; + bufIn.len = bufOut.len = 0; + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " + "Busy with diagnostic reset\n", __FILE__, __LINE__); + return -EBUSY; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + + /* Basic sanity checks to prevent underflows or integer overflows */ + if (karg.maxReplyBytes < 0 || + karg.dataInSize < 0 || + karg.dataOutSize < 0 || + karg.dataSgeOffset < 0 || + karg.maxSenseBytes < 0 || + karg.dataSgeOffset > ioc->req_sz / 4) + return -EINVAL; + + /* Verify that the final request frame will not be too large. + */ + sz = karg.dataSgeOffset * 4; + if (karg.dataInSize > 0) + sz += ioc->SGE_size; + if (karg.dataOutSize > 0) + sz += ioc->SGE_size; + + if (sz > ioc->req_sz) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Request frame too large (%d) maximum (%d)\n", + ioc->name, __FILE__, __LINE__, sz, ioc->req_sz); + return -EFAULT; + } + + /* Get a free request frame and save the message context. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) + return -EAGAIN; + + hdr = (MPIHeader_t *) mf; + msgContext = le32_to_cpu(hdr->MsgContext); + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + + /* Copy the request frame + * Reset the saved message context. + * Request frame in user space + */ + if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Unable to read MF from mpt_ioctl_command struct @ %p\n", + ioc->name, __FILE__, __LINE__, mfPtr); + function = -1; + rc = -EFAULT; + goto done_free_mem; + } + hdr->MsgContext = cpu_to_le32(msgContext); + function = hdr->Function; + + + /* Verify that this request is allowed. + */ + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", + ioc->name, hdr->Function, mf)); + + switch (function) { + case MPI_FUNCTION_IOC_FACTS: + case MPI_FUNCTION_PORT_FACTS: + karg.dataOutSize = karg.dataInSize = 0; + break; + + case MPI_FUNCTION_CONFIG: + { + Config_t *config_frame; + config_frame = (Config_t *)mf; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x " + "number=0x%02x action=0x%02x\n", ioc->name, + config_frame->Header.PageType, + config_frame->ExtPageType, + config_frame->Header.PageNumber, + config_frame->Action)); + break; + } + + case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: + case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: + case MPI_FUNCTION_FW_UPLOAD: + case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + case MPI_FUNCTION_FW_DOWNLOAD: + case MPI_FUNCTION_FC_PRIMITIVE_SEND: + case MPI_FUNCTION_TOOLBOX: + case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: + break; + + case MPI_FUNCTION_SCSI_IO_REQUEST: + if (ioc->sh) { + SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; + int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; + int scsidir = 0; + int dataSize; + u32 id; + + id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus; + if (pScsiReq->TargetID > id) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Target ID out of bounds. \n", + ioc->name, __FILE__, __LINE__); + rc = -ENODEV; + goto done_free_mem; + } + + if (pScsiReq->Bus >= ioc->number_of_buses) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Target Bus out of bounds. \n", + ioc->name, __FILE__, __LINE__); + rc = -ENODEV; + goto done_free_mem; + } + + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; + pScsiReq->MsgFlags |= mpt_msg_flags(ioc); + + + /* verify that app has not requested + * more sense data than driver + * can provide, if so, reset this parameter + * set the sense buffer pointer low address + * update the control field to specify Q type + */ + if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + else + pScsiReq->SenseBufferLength = karg.maxSenseBytes; + + pScsiReq->SenseBufferLowAddr = + cpu_to_le32(ioc->sense_buf_low_dma + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + shost_for_each_device(sdev, ioc->sh) { + struct scsi_target *starget = scsi_target(sdev); + VirtTarget *vtarget = starget->hostdata; + + if (vtarget == NULL) + continue; + + if ((pScsiReq->TargetID == vtarget->id) && + (pScsiReq->Bus == vtarget->channel) && + (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) + qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; + } + + /* Have the IOCTL driver set the direction based + * on the dataOutSize (ordering issue with Sparc). + */ + if (karg.dataOutSize > 0) { + scsidir = MPI_SCSIIO_CONTROL_WRITE; + dataSize = karg.dataOutSize; + } else { + scsidir = MPI_SCSIIO_CONTROL_READ; + dataSize = karg.dataInSize; + } + + pScsiReq->Control = cpu_to_le32(scsidir | qtag); + pScsiReq->DataLength = cpu_to_le32(dataSize); + + + } else { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + ioc->name, __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + case MPI_FUNCTION_SMP_PASSTHROUGH: + /* Check mf->PassthruFlags to determine if + * transfer is ImmediateMode or not. + * Immediate mode returns data in the ReplyFrame. + * Else, we are sending request and response data + * in two SGLs at the end of the mf. + */ + break; + + case MPI_FUNCTION_SATA_PASSTHROUGH: + if (!ioc->sh) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + ioc->name, __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + case MPI_FUNCTION_RAID_ACTION: + /* Just add a SGE + */ + break; + + case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + if (ioc->sh) { + SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; + int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; + int scsidir = MPI_SCSIIO_CONTROL_READ; + int dataSize; + + pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; + pScsiReq->MsgFlags |= mpt_msg_flags(ioc); + + + /* verify that app has not requested + * more sense data than driver + * can provide, if so, reset this parameter + * set the sense buffer pointer low address + * update the control field to specify Q type + */ + if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + else + pScsiReq->SenseBufferLength = karg.maxSenseBytes; + + pScsiReq->SenseBufferLowAddr = + cpu_to_le32(ioc->sense_buf_low_dma + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + /* All commands to physical devices are tagged + */ + + /* Have the IOCTL driver set the direction based + * on the dataOutSize (ordering issue with Sparc). + */ + if (karg.dataOutSize > 0) { + scsidir = MPI_SCSIIO_CONTROL_WRITE; + dataSize = karg.dataOutSize; + } else { + scsidir = MPI_SCSIIO_CONTROL_READ; + dataSize = karg.dataInSize; + } + + pScsiReq->Control = cpu_to_le32(scsidir | qtag); + pScsiReq->DataLength = cpu_to_le32(dataSize); + + } else { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + ioc->name, __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + case MPI_FUNCTION_SCSI_TASK_MGMT: + { + SCSITaskMgmt_t *pScsiTm; + pScsiTm = (SCSITaskMgmt_t *)mf; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "\tTaskType=0x%x MsgFlags=0x%x " + "TaskMsgContext=0x%x id=%d channel=%d\n", + ioc->name, pScsiTm->TaskType, le32_to_cpu + (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, + pScsiTm->TargetID, pScsiTm->Bus)); + break; + } + + case MPI_FUNCTION_IOC_INIT: + { + IOCInit_t *pInit = (IOCInit_t *) mf; + u32 high_addr, sense_high; + + /* Verify that all entries in the IOC INIT match + * existing setup (and in LE format). + */ + if (sizeof(dma_addr_t) == sizeof(u64)) { + high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32)); + sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); + } else { + high_addr = 0; + sense_high= 0; + } + + if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) || + (pInit->MaxBuses != ioc->facts.MaxBuses) || + (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) || + (pInit->HostMfaHighAddr != high_addr) || + (pInit->SenseBufferHighAddr != sense_high)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n", + ioc->name, __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + } + break; + default: + /* + * MPI_FUNCTION_PORT_ENABLE + * MPI_FUNCTION_TARGET_CMD_BUFFER_POST + * MPI_FUNCTION_TARGET_ASSIST + * MPI_FUNCTION_TARGET_STATUS_SEND + * MPI_FUNCTION_TARGET_MODE_ABORT + * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET + * MPI_FUNCTION_IO_UNIT_RESET + * MPI_FUNCTION_HANDSHAKE + * MPI_FUNCTION_REPLY_FRAME_REMOVAL + * MPI_FUNCTION_EVENT_NOTIFICATION + * (driver handles event notification) + * MPI_FUNCTION_EVENT_ACK + */ + + /* What to do with these??? CHECK ME!!! + MPI_FUNCTION_FC_LINK_SRVC_BUF_POST + MPI_FUNCTION_FC_LINK_SRVC_RSP + MPI_FUNCTION_FC_ABORT + MPI_FUNCTION_LAN_SEND + MPI_FUNCTION_LAN_RECEIVE + MPI_FUNCTION_LAN_RESET + */ + + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Illegal request (function 0x%x) \n", + ioc->name, __FILE__, __LINE__, hdr->Function); + rc = -EFAULT; + goto done_free_mem; + } + + /* Add the SGL ( at most one data in SGE and one data out SGE ) + * In the case of two SGE's - the data out (write) will always + * preceede the data in (read) SGE. psgList is used to free the + * allocated memory. + */ + psge = (char *) (((int *) mf) + karg.dataSgeOffset); + flagsLength = 0; + + if (karg.dataOutSize > 0) + sgSize ++; + + if (karg.dataInSize > 0) + sgSize ++; + + if (sgSize > 0) { + + /* Set up the dataOut memory allocation */ + if (karg.dataOutSize > 0) { + if (karg.dataInSize > 0) { + flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_DIRECTION) + << MPI_SGE_FLAGS_SHIFT; + } else { + flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; + } + flagsLength |= karg.dataOutSize; + bufOut.len = karg.dataOutSize; + bufOut.kptr = pci_alloc_consistent( + ioc->pcidev, bufOut.len, &dma_addr_out); + + if (bufOut.kptr == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } else { + /* Set up this SGE. + * Copy to MF and to sglbuf + */ + ioc->add_sge(psge, flagsLength, dma_addr_out); + psge += ioc->SGE_size; + + /* Copy user data to kernel space. + */ + if (copy_from_user(bufOut.kptr, + karg.dataOutBufPtr, + bufOut.len)) { + printk(MYIOC_s_ERR_FMT + "%s@%d::mptctl_do_mpt_command - Unable " + "to read user data " + "struct @ %p\n", + ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr); + rc = -EFAULT; + goto done_free_mem; + } + } + } + + if (karg.dataInSize > 0) { + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; + flagsLength |= karg.dataInSize; + + bufIn.len = karg.dataInSize; + bufIn.kptr = pci_alloc_consistent(ioc->pcidev, + bufIn.len, &dma_addr_in); + + if (bufIn.kptr == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } else { + /* Set up this SGE + * Copy to MF and to sglbuf + */ + ioc->add_sge(psge, flagsLength, dma_addr_in); + } + } + } else { + /* Add a NULL SGE + */ + ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); + } + + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); + INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) + if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { + + mutex_lock(&ioc->taskmgmt_cmds.mutex); + if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + goto done_free_mem; + } + + DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); + + if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && + (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) + mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf); + else { + rc =mpt_send_handshake_request(mptctl_id, ioc, + sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); + if (rc != 0) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "send_handshake FAILED! (ioc %p, mf %p)\n", + ioc->name, ioc, mf)); + mpt_clear_taskmgmt_in_progress_flag(ioc); + rc = -ENODATA; + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + goto done_free_mem; + } + } + + } else + mpt_put_msg_frame(mptctl_id, ioc, mf); + + /* Now wait for the command to complete */ + timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; +retry_wait: + timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, + HZ*timeout); + if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + rc = -ETIME; + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", + ioc->name, __func__)); + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + if (function == MPI_FUNCTION_SCSI_TASK_MGMT) + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + goto done_free_mem; + } + if (!timeleft) { + printk(MYIOC_s_WARN_FMT + "mpt cmd timeout, doorbell=0x%08x" + " function=0x%x\n", + ioc->name, mpt_GetIocState(ioc, 0), function); + if (function == MPI_FUNCTION_SCSI_TASK_MGMT) + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + mptctl_timeout_expired(ioc, mf); + mf = NULL; + } else + goto retry_wait; + goto done_free_mem; + } + + if (function == MPI_FUNCTION_SCSI_TASK_MGMT) + mutex_unlock(&ioc->taskmgmt_cmds.mutex); + + + mf = NULL; + + /* If a valid reply frame, copy to the user. + * Offset 2: reply length in U32's + */ + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { + if (karg.maxReplyBytes < ioc->reply_sz) { + sz = min(karg.maxReplyBytes, + 4*ioc->ioctl_cmds.reply[2]); + } else { + sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); + } + if (sz > 0) { + if (copy_to_user(karg.replyFrameBufPtr, + ioc->ioctl_cmds.reply, sz)){ + printk(MYIOC_s_ERR_FMT + "%s@%d::mptctl_do_mpt_command - " + "Unable to write out reply frame %p\n", + ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr); + rc = -ENODATA; + goto done_free_mem; + } + } + } + + /* If valid sense data, copy to user. + */ + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { + sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); + if (sz > 0) { + if (copy_to_user(karg.senseDataPtr, + ioc->ioctl_cmds.sense, sz)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Unable to write sense data to user %p\n", + ioc->name, __FILE__, __LINE__, + karg.senseDataPtr); + rc = -ENODATA; + goto done_free_mem; + } + } + } + + /* If the overall status is _GOOD and data in, copy data + * to user. + */ + if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && + (karg.dataInSize > 0) && (bufIn.kptr)) { + + if (copy_to_user(karg.dataInBufPtr, + bufIn.kptr, karg.dataInSize)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "Unable to write data to user %p\n", + ioc->name, __FILE__, __LINE__, + karg.dataInBufPtr); + rc = -ENODATA; + } + } + +done_free_mem: + + CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); + + /* Free the allocated memory. + */ + if (bufOut.kptr != NULL) { + pci_free_consistent(ioc->pcidev, + bufOut.len, (void *) bufOut.kptr, dma_addr_out); + } + + if (bufIn.kptr != NULL) { + pci_free_consistent(ioc->pcidev, + bufIn.len, (void *) bufIn.kptr, dma_addr_in); + } + + /* mf is null if command issued successfully + * otherwise, failure occurred after mf acquired. + */ + if (mf) + mpt_free_msg_frame(ioc, mf); + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Prototype Routine for the HOST INFO command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -EBUSY if previous command timeout and IOC reset is not complete. + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) +{ + hp_host_info_t __user *uarg = (void __user *) arg; + MPT_ADAPTER *ioc; + struct pci_dev *pdev; + char *pbuf=NULL; + dma_addr_t buf_dma; + hp_host_info_t karg; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + int iocnum; + int rc, cim_rev; + ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; + MPT_FRAME_HDR *mf = NULL; + unsigned long timeleft; + int retval; + u32 msgcontext; + + /* Reset long to int. Should affect IA64 and SPARC only + */ + if (data_size == sizeof(hp_host_info_t)) + cim_rev = 1; + else if (data_size == sizeof(hp_host_info_rev0_t)) + cim_rev = 0; /* obsolete */ + else + return -EFAULT; + + if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - " + "Unable to read in hp_host_info struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", + ioc->name)); + + /* Fill in the data and return the structure to the calling + * program + */ + pdev = (struct pci_dev *) ioc->pcidev; + + karg.vendor = pdev->vendor; + karg.device = pdev->device; + karg.subsystem_id = pdev->subsystem_device; + karg.subsystem_vendor = pdev->subsystem_vendor; + karg.devfn = pdev->devfn; + karg.bus = pdev->bus->number; + + /* Save the SCSI host no. if + * SCSI driver loaded + */ + if (ioc->sh != NULL) + karg.host_no = ioc->sh->host_no; + else + karg.host_no = -1; + + /* Reformat the fw_version into a string */ + snprintf(karg.fw_version, sizeof(karg.fw_version), + "%.2hhu.%.2hhu.%.2hhu.%.2hhu", + ioc->facts.FWVersion.Struct.Major, + ioc->facts.FWVersion.Struct.Minor, + ioc->facts.FWVersion.Struct.Unit, + ioc->facts.FWVersion.Struct.Dev); + + /* Issue a config request to get the device serial number + */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; /* read */ + cfg.timeout = 10; + + strncpy(karg.serial_number, " ", 24); + if (mpt_config(ioc, &cfg) == 0) { + if (cfg.cfghdr.hdr->PageLength > 0) { + /* Issue the second config page request */ + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) == 0) { + ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; + if (strlen(pdata->BoardTracerNumber) > 1) { + strncpy(karg.serial_number, pdata->BoardTracerNumber, 24); + karg.serial_number[24-1]='\0'; + } + } + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + } + rc = mpt_GetIocState(ioc, 1); + switch (rc) { + case MPI_IOC_STATE_OPERATIONAL: + karg.ioc_status = HP_STATUS_OK; + break; + + case MPI_IOC_STATE_FAULT: + karg.ioc_status = HP_STATUS_FAILED; + break; + + case MPI_IOC_STATE_RESET: + case MPI_IOC_STATE_READY: + default: + karg.ioc_status = HP_STATUS_OTHER; + break; + } + + karg.base_io_addr = pci_resource_start(pdev, 0); + + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) + karg.bus_phys_width = HP_BUS_WIDTH_UNK; + else + karg.bus_phys_width = HP_BUS_WIDTH_16; + + karg.hard_resets = 0; + karg.soft_resets = 0; + karg.timeouts = 0; + if (ioc->sh != NULL) { + MPT_SCSI_HOST *hd = shost_priv(ioc->sh); + + if (hd && (cim_rev == 1)) { + karg.hard_resets = ioc->hard_resets; + karg.soft_resets = ioc->soft_resets; + karg.timeouts = ioc->timeouts; + } + } + + /* + * Gather ISTWI(Industry Standard Two Wire Interface) Data + */ + if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT + "%s, no msg frames!!\n", ioc->name, __func__)); + goto out; + } + + IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; + msgcontext = IstwiRWRequest->MsgContext; + memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); + IstwiRWRequest->MsgContext = msgcontext; + IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; + IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; + IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; + IstwiRWRequest->NumAddressBytes = 0x01; + IstwiRWRequest->DataLength = cpu_to_le16(0x04); + if (pdev->devfn & 1) + IstwiRWRequest->DeviceAddr = 0xB2; + else + IstwiRWRequest->DeviceAddr = 0xB0; + + pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); + if (!pbuf) + goto out; + ioc->add_sge((char *)&IstwiRWRequest->SGL, + (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); + + retval = 0; + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, + IstwiRWRequest->MsgContext); + INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) + mpt_put_msg_frame(mptctl_id, ioc, mf); + +retry_wait: + timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, + HZ*MPT_IOCTL_DEFAULT_TIMEOUT); + if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { + retval = -ETIME; + printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { + mpt_free_msg_frame(ioc, mf); + goto out; + } + if (!timeleft) { + printk(MYIOC_s_WARN_FMT + "HOST INFO command timeout, doorbell=0x%08x\n", + ioc->name, mpt_GetIocState(ioc, 0)); + mptctl_timeout_expired(ioc, mf); + } else + goto retry_wait; + goto out; + } + + /* + *ISTWI Data Definition + * pbuf[0] = FW_VERSION = 0x4 + * pbuf[1] = Bay Count = 6 or 4 or 2, depending on + * the config, you should be seeing one out of these three values + * pbuf[2] = Drive Installed Map = bit pattern depend on which + * bays have drives in them + * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) + */ + if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) + karg.rsvd = *(u32 *)pbuf; + + out: + CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) + SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); + + if (pbuf) + pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - " + "Unable to write out hp_host_info @ %p\n", + ioc->name, __FILE__, __LINE__, uarg); + return -EFAULT; + } + + return 0; + +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Prototype Routine for the TARGET INFO command. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -EBUSY if previous command timeout and IOC reset is not complete. + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_hp_targetinfo(unsigned long arg) +{ + hp_target_info_t __user *uarg = (void __user *) arg; + SCSIDevicePage0_t *pg0_alloc; + SCSIDevicePage3_t *pg3_alloc; + MPT_ADAPTER *ioc; + MPT_SCSI_HOST *hd = NULL; + hp_target_info_t karg; + int iocnum; + int data_sz; + dma_addr_t page_dma; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + int tmp, np, rc = 0; + + if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_targetinfo - " + "Unable to read in hp_host_targetinfo struct @ %p\n", + __FILE__, __LINE__, uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + if (karg.hdr.id >= MPT_MAX_FC_DEVICES) + return -EINVAL; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", + ioc->name)); + + /* There is nothing to do for FCP parts. + */ + if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) + return 0; + + if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) + return 0; + + if (ioc->sh->host_no != karg.hdr.host) + return -ENODEV; + + /* Get the data transfer speeds + */ + data_sz = ioc->spi_data.sdp0length * 4; + pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); + if (pg0_alloc) { + hdr.PageVersion = ioc->spi_data.sdp0version; + hdr.PageLength = data_sz; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + + cfg.cfghdr.hdr = &hdr; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.dir = 0; + cfg.timeout = 0; + cfg.physAddr = page_dma; + + cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + np = le32_to_cpu(pg0_alloc->NegotiatedParameters); + karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ? + HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8; + + if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) { + tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; + if (tmp < 0x09) + karg.negotiated_speed = HP_DEV_SPEED_ULTRA320; + else if (tmp <= 0x09) + karg.negotiated_speed = HP_DEV_SPEED_ULTRA160; + else if (tmp <= 0x0A) + karg.negotiated_speed = HP_DEV_SPEED_ULTRA2; + else if (tmp <= 0x0C) + karg.negotiated_speed = HP_DEV_SPEED_ULTRA; + else if (tmp <= 0x25) + karg.negotiated_speed = HP_DEV_SPEED_FAST; + else + karg.negotiated_speed = HP_DEV_SPEED_ASYNC; + } else + karg.negotiated_speed = HP_DEV_SPEED_ASYNC; + } + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma); + } + + /* Set defaults + */ + karg.message_rejects = -1; + karg.phase_errors = -1; + karg.parity_errors = -1; + karg.select_timeouts = -1; + + /* Get the target error parameters + */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 3; + hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + + cfg.cfghdr.hdr = &hdr; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + cfg.physAddr = -1; + if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) { + /* Issue the second config page request */ + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + data_sz = (int) cfg.cfghdr.hdr->PageLength * 4; + pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent( + ioc->pcidev, data_sz, &page_dma); + if (pg3_alloc) { + cfg.physAddr = page_dma; + cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; + if ((rc = mpt_config(ioc, &cfg)) == 0) { + karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount); + karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount); + karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount); + } + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma); + } + } + hd = shost_priv(ioc->sh); + if (hd != NULL) + karg.select_timeouts = hd->sel_timeout[karg.hdr.id]; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hp_target_info - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + ioc->name, __FILE__, __LINE__, uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +static const struct file_operations mptctl_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .fasync = mptctl_fasync, + .unlocked_ioctl = mptctl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_mpctl_ioctl, +#endif +}; + +static struct miscdevice mptctl_miscdev = { + MPT_MINOR, + MYNAM, + &mptctl_fops +}; + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#ifdef CONFIG_COMPAT + +static int +compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct mpt_fw_xfer32 kfw32; + struct mpt_fw_xfer kfw; + MPT_ADAPTER *iocp = NULL; + int iocnum, iocnumX; + int nonblock = (filp->f_flags & O_NONBLOCK); + int ret; + + + if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) + return -EFAULT; + + /* Verify intended MPT adapter */ + iocnumX = kfw32.iocnum & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) { + printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", + __LINE__, iocnumX); + return -ENODEV; + } + + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", + iocp->name)); + kfw.iocnum = iocnum; + kfw.fwlen = kfw32.fwlen; + kfw.bufp = compat_ptr(kfw32.bufp); + + ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + + mutex_unlock(&iocp->ioctl_cmds.mutex); + + return ret; +} + +static int +compat_mpt_command(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct mpt_ioctl_command32 karg32; + struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; + struct mpt_ioctl_command karg; + MPT_ADAPTER *iocp = NULL; + int iocnum, iocnumX; + int nonblock = (filp->f_flags & O_NONBLOCK); + int ret; + + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) + return -EFAULT; + + /* Verify intended MPT adapter */ + iocnumX = karg32.hdr.iocnum & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) { + printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", + __LINE__, iocnumX); + return -ENODEV; + } + + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", + iocp->name)); + /* Copy data to karg */ + karg.hdr.iocnum = karg32.hdr.iocnum; + karg.hdr.port = karg32.hdr.port; + karg.timeout = karg32.timeout; + karg.maxReplyBytes = karg32.maxReplyBytes; + + karg.dataInSize = karg32.dataInSize; + karg.dataOutSize = karg32.dataOutSize; + karg.maxSenseBytes = karg32.maxSenseBytes; + karg.dataSgeOffset = karg32.dataSgeOffset; + + karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; + karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; + karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; + karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; + + /* Pass new structure to do_mpt_command + */ + ret = mptctl_do_mpt_command (karg, &uarg->MF); + + mutex_unlock(&iocp->ioctl_cmds.mutex); + + return ret; +} + +static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + long ret; + mutex_lock(&mpctl_mutex); + switch (cmd) { + case MPTIOCINFO: + case MPTIOCINFO1: + case MPTIOCINFO2: + case MPTTARGETINFO: + case MPTEVENTQUERY: + case MPTEVENTENABLE: + case MPTEVENTREPORT: + case MPTHARDRESET: + case HP_GETHOSTINFO: + case HP_GETTARGETINFO: + case MPTTEST: + ret = __mptctl_ioctl(f, cmd, arg); + break; + case MPTCOMMAND32: + ret = compat_mpt_command(f, cmd, arg); + break; + case MPTFWDOWNLOAD32: + ret = compat_mptfwxfer_ioctl(f, cmd, arg); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + mutex_unlock(&mpctl_mutex); + return ret; +} + +#endif + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_probe - Installs ioctl devices per bus. + * @pdev: Pointer to pci_dev structure + * + * Returns 0 for success, non-zero for failure. + * + */ + +static int +mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + + mutex_init(&ioc->ioctl_cmds.mutex); + init_completion(&ioc->ioctl_cmds.done); + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_remove - Removed ioctl devices + * @pdev: Pointer to pci_dev structure + * + * + */ +static void +mptctl_remove(struct pci_dev *pdev) +{ +} + +static struct mpt_pci_driver mptctl_driver = { + .probe = mptctl_probe, + .remove = mptctl_remove, +}; + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int __init mptctl_init(void) +{ + int err; + int where = 1; + + show_mptmod_ver(my_NAME, my_VERSION); + + mpt_device_driver_register(&mptctl_driver, MPTCTL_DRIVER); + + /* Register this device */ + err = misc_register(&mptctl_miscdev); + if (err < 0) { + printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR); + goto out_fail; + } + printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n"); + printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n", + mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); + + /* + * Install our handler + */ + ++where; + mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER, + "mptctl_reply"); + if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) { + printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); + misc_deregister(&mptctl_miscdev); + err = -EBUSY; + goto out_fail; + } + + mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER, + "mptctl_taskmgmt_reply"); + if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) { + printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); + mpt_deregister(mptctl_id); + misc_deregister(&mptctl_miscdev); + err = -EBUSY; + goto out_fail; + } + + mpt_reset_register(mptctl_id, mptctl_ioc_reset); + mpt_event_register(mptctl_id, mptctl_event_process); + + return 0; + +out_fail: + + mpt_device_driver_deregister(MPTCTL_DRIVER); + + return err; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static void mptctl_exit(void) +{ + misc_deregister(&mptctl_miscdev); + printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n", + mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); + + /* De-register event handler from base module */ + mpt_event_deregister(mptctl_id); + + /* De-register reset handler from base module */ + mpt_reset_deregister(mptctl_id); + + /* De-register callback handler from base module */ + mpt_deregister(mptctl_taskmgmt_id); + mpt_deregister(mptctl_id); + + mpt_device_driver_deregister(MPTCTL_DRIVER); + +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +module_init(mptctl_init); +module_exit(mptctl_exit); diff --git a/mptsas-kmod/el8/mptctl.h b/mptsas-kmod/el8/mptctl.h new file mode 100644 index 00000000..d564cc9a --- /dev/null +++ b/mptsas-kmod/el8/mptctl.h @@ -0,0 +1,467 @@ +/* + * linux/drivers/message/fusion/mptioctl.h + * Fusion MPT misc device (ioctl) driver. + * For use with PCI chip/adapter(s): + * LSIFC9xx/LSI409xx Fibre Channel + * running LSI Fusion MPT (Message Passing Technology) firmware. + * + * Copyright (c) 1999-2008 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef MPTCTL_H_INCLUDED +#define MPTCTL_H_INCLUDED +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * + */ +#define MPT_MISCDEV_BASENAME "mptctl" +#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME + +#define MPT_PRODUCT_LENGTH 12 + +/* + * Generic MPT Control IOCTLs and structures + */ +#define MPT_MAGIC_NUMBER 'm' + +#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w) + +#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer) +#define MPTCOMMAND _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command) + +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32) +#define MPTCOMMAND32 _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command32) +#endif + +#define MPTIOCINFO _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo) +#define MPTIOCINFO1 _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev0) +#define MPTIOCINFO2 _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev1) +#define MPTTARGETINFO _IOWR(MPT_MAGIC_NUMBER,18,struct mpt_ioctl_targetinfo) +#define MPTTEST _IOWR(MPT_MAGIC_NUMBER,19,struct mpt_ioctl_test) +#define MPTEVENTQUERY _IOWR(MPT_MAGIC_NUMBER,21,struct mpt_ioctl_eventquery) +#define MPTEVENTENABLE _IOWR(MPT_MAGIC_NUMBER,22,struct mpt_ioctl_eventenable) +#define MPTEVENTREPORT _IOWR(MPT_MAGIC_NUMBER,23,struct mpt_ioctl_eventreport) +#define MPTHARDRESET _IOWR(MPT_MAGIC_NUMBER,24,struct mpt_ioctl_diag_reset) +#define MPTFWREPLACE _IOWR(MPT_MAGIC_NUMBER,25,struct mpt_ioctl_replace_fw) + +/* + * SPARC PLATFORM REMARKS: + * IOCTL data structures that contain pointers + * will have different sizes in the driver and applications + * (as the app. will not use 8-byte pointers). + * Apps should use MPTFWDOWNLOAD and MPTCOMMAND. + * The driver will convert data from + * mpt_fw_xfer32 (mpt_ioctl_command32) to mpt_fw_xfer (mpt_ioctl_command) + * internally. + * + * If data structures change size, must handle as in IOCGETINFO. + */ +struct mpt_fw_xfer { + unsigned int iocnum; /* IOC unit number */ + unsigned int fwlen; + void __user *bufp; /* Pointer to firmware buffer */ +}; + +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +struct mpt_fw_xfer32 { + unsigned int iocnum; + unsigned int fwlen; + u32 bufp; +}; +#endif /*}*/ + +/* + * IOCTL header structure. + * iocnum - must be defined. + * port - must be defined for all IOCTL commands other than MPTIOCINFO + * maxDataSize - ignored on MPTCOMMAND commands + * - ignored on MPTFWREPLACE commands + * - on query commands, reports the maximum number of bytes to be returned + * to the host driver (count includes the header). + * That is, set to sizeof(struct mpt_ioctl_iocinfo) for fixed sized commands. + * Set to sizeof(struct mpt_ioctl_targetinfo) + datasize for variable + * sized commands. (MPTTARGETINFO, MPTEVENTREPORT) + */ +typedef struct _mpt_ioctl_header { + unsigned int iocnum; /* IOC unit number */ + unsigned int port; /* IOC port number */ + int maxDataSize; /* Maximum Num. bytes to transfer on read */ +} mpt_ioctl_header; + +/* + * Issue a diagnostic reset + */ +struct mpt_ioctl_diag_reset { + mpt_ioctl_header hdr; +}; + + +/* + * PCI bus/device/function information structure. + */ +struct mpt_ioctl_pci_info { + union { + struct { + unsigned int deviceNumber : 5; + unsigned int functionNumber : 3; + unsigned int busNumber : 24; + } bits; + unsigned int asUlong; + } u; +}; + +struct mpt_ioctl_pci_info2 { + union { + struct { + unsigned int deviceNumber : 5; + unsigned int functionNumber : 3; + unsigned int busNumber : 24; + } bits; + unsigned int asUlong; + } u; + int segmentID; +}; + +/* + * Adapter Information Page + * Read only. + * Data starts at offset 0xC + */ +#define MPT_IOCTL_INTERFACE_SCSI (0x00) +#define MPT_IOCTL_INTERFACE_FC (0x01) +#define MPT_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT_IOCTL_INTERFACE_SAS (0x03) +#define MPT_IOCTL_VERSION_LENGTH (32) + +struct mpt_ioctl_iocinfo { + mpt_ioctl_header hdr; + int adapterType; /* SCSI or FCP */ + int port; /* port number */ + int pciId; /* PCI Id. */ + int hwRev; /* hardware revision */ + int subSystemDevice; /* PCI subsystem Device ID */ + int subSystemVendor; /* PCI subsystem Vendor ID */ + int numDevices; /* number of devices */ + int FWVersion; /* FW Version (integer) */ + int BIOSVersion; /* BIOS Version (integer) */ + char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */ + char busChangeEvent; + char hostId; + char rsvd[2]; + struct mpt_ioctl_pci_info2 pciInfo; /* Added Rev 2 */ +}; + +struct mpt_ioctl_iocinfo_rev1 { + mpt_ioctl_header hdr; + int adapterType; /* SCSI or FCP */ + int port; /* port number */ + int pciId; /* PCI Id. */ + int hwRev; /* hardware revision */ + int subSystemDevice; /* PCI subsystem Device ID */ + int subSystemVendor; /* PCI subsystem Vendor ID */ + int numDevices; /* number of devices */ + int FWVersion; /* FW Version (integer) */ + int BIOSVersion; /* BIOS Version (integer) */ + char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */ + char busChangeEvent; + char hostId; + char rsvd[2]; + struct mpt_ioctl_pci_info pciInfo; /* Added Rev 1 */ +}; + +/* Original structure, must always accept these + * IOCTLs. 4 byte pads can occur based on arch with + * above structure. Wish to re-align, but cannot. + */ +struct mpt_ioctl_iocinfo_rev0 { + mpt_ioctl_header hdr; + int adapterType; /* SCSI or FCP */ + int port; /* port number */ + int pciId; /* PCI Id. */ + int hwRev; /* hardware revision */ + int subSystemDevice; /* PCI subsystem Device ID */ + int subSystemVendor; /* PCI subsystem Vendor ID */ + int numDevices; /* number of devices */ + int FWVersion; /* FW Version (integer) */ + int BIOSVersion; /* BIOS Version (integer) */ + char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */ + char busChangeEvent; + char hostId; + char rsvd[2]; +}; + +/* + * Device Information Page + * Report the number of, and ids of, all targets + * on this IOC. The ids array is a packed structure + * of the known targetInfo. + * bits 31-24: reserved + * 23-16: LUN + * 15- 8: Bus Number + * 7- 0: Target ID + */ +struct mpt_ioctl_targetinfo { + mpt_ioctl_header hdr; + int numDevices; /* Num targets on this ioc */ + int targetInfo[1]; +}; + + +/* + * Event reporting IOCTL's. These IOCTL's will + * use the following defines: + */ +struct mpt_ioctl_eventquery { + mpt_ioctl_header hdr; + unsigned short eventEntries; + unsigned short reserved; + unsigned int eventTypes; +}; + +struct mpt_ioctl_eventenable { + mpt_ioctl_header hdr; + unsigned int eventTypes; +}; + +#ifndef __KERNEL__ +typedef struct { + uint event; + uint eventContext; + uint data[2]; +} MPT_IOCTL_EVENTS; +#endif + +struct mpt_ioctl_eventreport { + mpt_ioctl_header hdr; + MPT_IOCTL_EVENTS eventData[1]; +}; + +#define MPT_MAX_NAME 32 +struct mpt_ioctl_test { + mpt_ioctl_header hdr; + u8 name[MPT_MAX_NAME]; + int chip_type; + u8 product [MPT_PRODUCT_LENGTH]; +}; + +/* Replace the FW image cached in host driver memory + * newImageSize - image size in bytes + * newImage - first byte of the new image + */ +typedef struct mpt_ioctl_replace_fw { + mpt_ioctl_header hdr; + int newImageSize; + u8 newImage[1]; +} mpt_ioctl_replace_fw_t; + +/* General MPT Pass through data strucutre + * + * iocnum + * timeout - in seconds, command timeout. If 0, set by driver to + * default value. + * replyFrameBufPtr - reply location + * dataInBufPtr - destination for read + * dataOutBufPtr - data source for write + * senseDataPtr - sense data location + * maxReplyBytes - maximum number of reply bytes to be sent to app. + * dataInSize - num bytes for data transfer in (read) + * dataOutSize - num bytes for data transfer out (write) + * dataSgeOffset - offset in words from the start of the request message + * to the first SGL + * MF[1]; + * + * Remark: Some config pages have bi-directional transfer, + * both a read and a write. The basic structure allows for + * a bidirectional set up. Normal messages will have one or + * both of these buffers NULL. + */ +struct mpt_ioctl_command { + mpt_ioctl_header hdr; + int timeout; /* optional (seconds) */ + char __user *replyFrameBufPtr; + char __user *dataInBufPtr; + char __user *dataOutBufPtr; + char __user *senseDataPtr; + int maxReplyBytes; + int dataInSize; + int dataOutSize; + int maxSenseBytes; + int dataSgeOffset; + char MF[1]; +}; + +/* + * SPARC PLATFORM: See earlier remark. + */ +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +struct mpt_ioctl_command32 { + mpt_ioctl_header hdr; + int timeout; + u32 replyFrameBufPtr; + u32 dataInBufPtr; + u32 dataOutBufPtr; + u32 senseDataPtr; + int maxReplyBytes; + int dataInSize; + int dataOutSize; + int maxSenseBytes; + int dataSgeOffset; + char MF[1]; +}; +#endif /*}*/ + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#define CPQFCTS_IOC_MAGIC 'Z' +#define HP_IOC_MAGIC 'Z' +#define HP_GETHOSTINFO _IOR(HP_IOC_MAGIC, 20, hp_host_info_t) +#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t) +#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t) + +typedef struct _hp_header { + unsigned int iocnum; + unsigned int host; + unsigned int channel; + unsigned int id; + unsigned int lun; +} hp_header_t; + +/* + * Header: + * iocnum required (input) + * host ignored + * channe ignored + * id ignored + * lun ignored + */ +typedef struct _hp_host_info { + hp_header_t hdr; + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem_id; + u8 devfn; + u8 bus; + ushort host_no; /* SCSI Host number, if scsi driver not loaded*/ + u8 fw_version[16]; /* string */ + u8 serial_number[24]; /* string */ + u32 ioc_status; + u32 bus_phys_width; + u32 base_io_addr; + u32 rsvd; + unsigned int hard_resets; /* driver initiated resets */ + unsigned int soft_resets; /* ioc, external resets */ + unsigned int timeouts; /* num timeouts */ +} hp_host_info_t; + +/* replace ulongs with uints, need to preserve backwards + * compatibility. + */ +typedef struct _hp_host_info_rev0 { + hp_header_t hdr; + u16 vendor; + u16 device; + u16 subsystem_vendor; + u16 subsystem_id; + u8 devfn; + u8 bus; + ushort host_no; /* SCSI Host number, if scsi driver not loaded*/ + u8 fw_version[16]; /* string */ + u8 serial_number[24]; /* string */ + u32 ioc_status; + u32 bus_phys_width; + u32 base_io_addr; + u32 rsvd; + unsigned long hard_resets; /* driver initiated resets */ + unsigned long soft_resets; /* ioc, external resets */ + unsigned long timeouts; /* num timeouts */ +} hp_host_info_rev0_t; + +/* + * Header: + * iocnum required (input) + * host required + * channel required (bus number) + * id required + * lun ignored + * + * All error values between 0 and 0xFFFF in size. + */ +typedef struct _hp_target_info { + hp_header_t hdr; + u32 parity_errors; + u32 phase_errors; + u32 select_timeouts; + u32 message_rejects; + u32 negotiated_speed; + u8 negotiated_width; + u8 rsvd[7]; /* 8 byte alignment */ +} hp_target_info_t; + +#define HP_STATUS_OTHER 1 +#define HP_STATUS_OK 2 +#define HP_STATUS_FAILED 3 + +#define HP_BUS_WIDTH_UNK 1 +#define HP_BUS_WIDTH_8 2 +#define HP_BUS_WIDTH_16 3 +#define HP_BUS_WIDTH_32 4 + +#define HP_DEV_SPEED_ASYNC 2 +#define HP_DEV_SPEED_FAST 3 +#define HP_DEV_SPEED_ULTRA 4 +#define HP_DEV_SPEED_ULTRA2 5 +#define HP_DEV_SPEED_ULTRA160 6 +#define HP_DEV_SPEED_SCSI1 7 +#define HP_DEV_SPEED_ULTRA320 8 + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#endif + diff --git a/mptsas-kmod/el8/mptfc.c b/mptsas-kmod/el8/mptfc.c new file mode 100644 index 00000000..06b17542 --- /dev/null +++ b/mptsas-kmod/el8/mptfc.c @@ -0,0 +1,1554 @@ +/* + * linux/drivers/message/fusion/mptfc.c + * For use with LSI PCI chip/adapter(s) + * running LSI Fusion MPT (Message Passing Technology) firmware. + * + * Copyright (c) 1999-2008 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +#include +#include +#include +#include +#include +#include +#include /* for mdelay */ +#include /* needed for in_interrupt() proto */ +#include /* notifier code */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mptbase.h" +#include "mptscsih.h" + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +#define my_NAME "Fusion MPT FC Host driver" +#define my_VERSION MPT_LINUX_VERSION_COMMON +#define MYNAM "mptfc" + +MODULE_AUTHOR(MODULEAUTHOR); +MODULE_DESCRIPTION(my_NAME); +MODULE_LICENSE("GPL"); +MODULE_VERSION(my_VERSION); + +/* Command line args */ +#define MPTFC_DEV_LOSS_TMO (60) +static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */ +module_param(mptfc_dev_loss_tmo, int, 0); +MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the " + " transport to wait for an rport to " + " return following a device loss event." + " Default=60."); + +/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ +#define MPTFC_MAX_LUN (16895) +static int max_lun = MPTFC_MAX_LUN; +module_param(max_lun, int, 0); +MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); + +static u8 mptfcDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; +static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; +static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; + +static int mptfc_target_alloc(struct scsi_target *starget); +static int mptfc_slave_alloc(struct scsi_device *sdev); +static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); +static void mptfc_target_destroy(struct scsi_target *starget); +static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); +static void mptfc_remove(struct pci_dev *pdev); +static int mptfc_abort(struct scsi_cmnd *SCpnt); +static int mptfc_dev_reset(struct scsi_cmnd *SCpnt); +static int mptfc_bus_reset(struct scsi_cmnd *SCpnt); + +static struct scsi_host_template mptfc_driver_template = { + .module = THIS_MODULE, + .proc_name = "mptfc", + .show_info = mptscsih_show_info, + .name = "MPT FC Host", + .info = mptscsih_info, + .queuecommand = mptfc_qcmd, + .target_alloc = mptfc_target_alloc, + .slave_alloc = mptfc_slave_alloc, + .slave_configure = mptscsih_slave_configure, + .target_destroy = mptfc_target_destroy, + .slave_destroy = mptscsih_slave_destroy, + .change_queue_depth = mptscsih_change_queue_depth, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = mptfc_abort, + .eh_device_reset_handler = mptfc_dev_reset, + .eh_bus_reset_handler = mptfc_bus_reset, + .eh_host_reset_handler = mptscsih_host_reset, + .bios_param = mptscsih_bios_param, + .can_queue = MPT_FC_CAN_QUEUE, + .this_id = -1, + .sg_tablesize = MPT_SCSI_SG_DEPTH, + .max_sectors = 8192, + .cmd_per_lun = 7, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = mptscsih_host_attrs, +}; + +/**************************************************************************** + * Supported hardware + */ + +static struct pci_device_id mptfc_pci_table[] = { + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC909, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919X, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929X, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC939X, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949X, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E, + PCI_ANY_ID, PCI_ANY_ID }, + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, mptfc_pci_table); + +static struct scsi_transport_template *mptfc_transport_template = NULL; + +static struct fc_function_template mptfc_transport_functions = { + .dd_fcrport_size = 8, + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_port_id = 1, + .show_rport_supported_classes = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .show_host_supported_speeds = 1, + .show_host_maxframe_size = 1, + .show_host_speed = 1, + .show_host_fabric_name = 1, + .show_host_port_type = 1, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, +}; + +static int +mptfc_block_error_handler(struct scsi_cmnd *SCpnt, + int (*func)(struct scsi_cmnd *SCpnt), + const char *caller) +{ + MPT_SCSI_HOST *hd; + struct scsi_device *sdev = SCpnt->device; + struct Scsi_Host *shost = sdev->host; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + unsigned long flags; + int ready; + MPT_ADAPTER *ioc; + int loops = 40; /* seconds */ + + hd = shost_priv(SCpnt->device->host); + ioc = hd->ioc; + spin_lock_irqsave(shost->host_lock, flags); + while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY + || (loops > 0 && ioc->active == 0)) { + spin_unlock_irqrestore(shost->host_lock, flags); + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "mptfc_block_error_handler.%d: %d:%llu, port status is " + "%x, active flag %d, deferring %s recovery.\n", + ioc->name, ioc->sh->host_no, + SCpnt->device->id, SCpnt->device->lun, + ready, ioc->active, caller)); + msleep(1000); + spin_lock_irqsave(shost->host_lock, flags); + loops --; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata + || ioc->active == 0) { + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "%s.%d: %d:%llu, failing recovery, " + "port state %x, active %d, vdevice %p.\n", caller, + ioc->name, ioc->sh->host_no, + SCpnt->device->id, SCpnt->device->lun, ready, + ioc->active, SCpnt->device->hostdata)); + return FAILED; + } + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "%s.%d: %d:%llu, executing recovery.\n", caller, + ioc->name, ioc->sh->host_no, + SCpnt->device->id, SCpnt->device->lun)); + return (*func)(SCpnt); +} + +static int +mptfc_abort(struct scsi_cmnd *SCpnt) +{ + return + mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__); +} + +static int +mptfc_dev_reset(struct scsi_cmnd *SCpnt) +{ + return + mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__); +} + +static int +mptfc_bus_reset(struct scsi_cmnd *SCpnt) +{ + return + mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__); +} + +static void +mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) +{ + if (timeout > 0) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = mptfc_dev_loss_tmo; +} + +static int +mptfc_FcDevPage0_cmp_func(const void *a, const void *b) +{ + FCDevicePage0_t **aa = (FCDevicePage0_t **)a; + FCDevicePage0_t **bb = (FCDevicePage0_t **)b; + + if ((*aa)->CurrentBus == (*bb)->CurrentBus) { + if ((*aa)->CurrentTargetID == (*bb)->CurrentTargetID) + return 0; + if ((*aa)->CurrentTargetID < (*bb)->CurrentTargetID) + return -1; + return 1; + } + if ((*aa)->CurrentBus < (*bb)->CurrentBus) + return -1; + return 1; +} + +static int +mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port, + void(*func)(MPT_ADAPTER *ioc,int channel, FCDevicePage0_t *arg)) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + FCDevicePage0_t *ppage0_alloc, *fc; + dma_addr_t page0_dma; + int data_sz; + int ii; + + FCDevicePage0_t *p0_array=NULL, *p_p0; + FCDevicePage0_t **pp0_array=NULL, **p_pp0; + + int rc = -ENOMEM; + U32 port_id = 0xffffff; + int num_targ = 0; + int max_bus = ioc->facts.MaxBuses; + int max_targ; + + max_targ = (ioc->facts.MaxDevices == 0) ? 256 : ioc->facts.MaxDevices; + + data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ; + p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL); + if (!p0_array) + goto out; + + data_sz = sizeof(FCDevicePage0_t *) * max_bus * max_targ; + p_pp0 = pp0_array = kzalloc(data_sz, GFP_KERNEL); + if (!pp0_array) + goto out; + + do { + /* Get FC Device Page 0 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_DEVICE; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = port_id; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + break; + + if (hdr.PageLength <= 0) + break; + + data_sz = hdr.PageLength * 4; + ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, + &page0_dma); + rc = -ENOMEM; + if (!ppage0_alloc) + break; + + cfg.physAddr = page0_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + ppage0_alloc->PortIdentifier = + le32_to_cpu(ppage0_alloc->PortIdentifier); + + ppage0_alloc->WWNN.Low = + le32_to_cpu(ppage0_alloc->WWNN.Low); + + ppage0_alloc->WWNN.High = + le32_to_cpu(ppage0_alloc->WWNN.High); + + ppage0_alloc->WWPN.Low = + le32_to_cpu(ppage0_alloc->WWPN.Low); + + ppage0_alloc->WWPN.High = + le32_to_cpu(ppage0_alloc->WWPN.High); + + ppage0_alloc->BBCredit = + le16_to_cpu(ppage0_alloc->BBCredit); + + ppage0_alloc->MaxRxFrameSize = + le16_to_cpu(ppage0_alloc->MaxRxFrameSize); + + port_id = ppage0_alloc->PortIdentifier; + num_targ++; + *p_p0 = *ppage0_alloc; /* save data */ + *p_pp0++ = p_p0++; /* save addr */ + } + pci_free_consistent(ioc->pcidev, data_sz, + (u8 *) ppage0_alloc, page0_dma); + if (rc != 0) + break; + + } while (port_id <= 0xff0000); + + if (num_targ) { + /* sort array */ + if (num_targ > 1) + sort (pp0_array, num_targ, sizeof(FCDevicePage0_t *), + mptfc_FcDevPage0_cmp_func, NULL); + /* call caller's func for each targ */ + for (ii = 0; ii < num_targ; ii++) { + fc = *(pp0_array+ii); + func(ioc, ioc_port, fc); + } + } + + out: + kfree(pp0_array); + kfree(p0_array); + return rc; +} + +static int +mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid) +{ + /* not currently usable */ + if (pg0->Flags & (MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID | + MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID)) + return -1; + + if (!(pg0->Flags & MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID)) + return -1; + + if (!(pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET)) + return -1; + + /* + * board data structure already normalized to platform endianness + * shifted to avoid unaligned access on 64 bit architecture + */ + rid->node_name = ((u64)pg0->WWNN.High) << 32 | (u64)pg0->WWNN.Low; + rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; + rid->port_id = pg0->PortIdentifier; + rid->roles = FC_RPORT_ROLE_UNKNOWN; + + return 0; +} + +static void +mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) +{ + struct fc_rport_identifiers rport_ids; + struct fc_rport *rport; + struct mptfc_rport_info *ri; + int new_ri = 1; + u64 pn, nn; + VirtTarget *vtarget; + u32 roles = FC_RPORT_ROLE_UNKNOWN; + + if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) + return; + + roles |= FC_RPORT_ROLE_FCP_TARGET; + if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) + roles |= FC_RPORT_ROLE_FCP_INITIATOR; + + /* scan list looking for a match */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; + if (pn == rport_ids.port_name) { /* match */ + list_move_tail(&ri->list, &ioc->fc_rports); + new_ri = 0; + break; + } + } + if (new_ri) { /* allocate one */ + ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); + if (!ri) + return; + list_add_tail(&ri->list, &ioc->fc_rports); + } + + ri->pg0 = *pg0; /* add/update pg0 data */ + ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING; + + /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ + if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { + ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; + rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); + if (rport) { + ri->rport = rport; + if (new_ri) /* may have been reset by user */ + rport->dev_loss_tmo = mptfc_dev_loss_tmo; + /* + * if already mapped, remap here. If not mapped, + * target_alloc will allocate vtarget and map, + * slave_alloc will fill in vdevice from vtarget. + */ + if (ri->starget) { + vtarget = ri->starget->hostdata; + if (vtarget) { + vtarget->id = pg0->CurrentTargetID; + vtarget->channel = pg0->CurrentBus; + vtarget->deleted = 0; + } + } + *((struct mptfc_rport_info **)rport->dd_data) = ri; + /* scan will be scheduled once rport becomes a target */ + fc_remote_port_rolechg(rport,roles); + + pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; + nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " + "rport tid %d, tmo %d\n", + ioc->name, + ioc->sh->host_no, + pg0->PortIdentifier, + (unsigned long long)nn, + (unsigned long long)pn, + pg0->CurrentTargetID, + ri->rport->scsi_target_id, + ri->rport->dev_loss_tmo)); + } else { + list_del(&ri->list); + kfree(ri); + ri = NULL; + } + } +} + +/* + * OS entry point to allow for host driver to free allocated memory + * Called if no device present or device being unloaded + */ +static void +mptfc_target_destroy(struct scsi_target *starget) +{ + struct fc_rport *rport; + struct mptfc_rport_info *ri; + + rport = starget_to_rport(starget); + if (rport) { + ri = *((struct mptfc_rport_info **)rport->dd_data); + if (ri) /* better be! */ + ri->starget = NULL; + } + kfree(starget->hostdata); + starget->hostdata = NULL; +} + +/* + * OS entry point to allow host driver to alloc memory + * for each scsi target. Called once per device the bus scan. + * Return non-zero if allocation fails. + */ +static int +mptfc_target_alloc(struct scsi_target *starget) +{ + VirtTarget *vtarget; + struct fc_rport *rport; + struct mptfc_rport_info *ri; + int rc; + + vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL); + if (!vtarget) + return -ENOMEM; + starget->hostdata = vtarget; + + rc = -ENODEV; + rport = starget_to_rport(starget); + if (rport) { + ri = *((struct mptfc_rport_info **)rport->dd_data); + if (ri) { /* better be! */ + vtarget->id = ri->pg0.CurrentTargetID; + vtarget->channel = ri->pg0.CurrentBus; + ri->starget = starget; + rc = 0; + } + } + if (rc != 0) { + kfree(vtarget); + starget->hostdata = NULL; + } + + return rc; +} +/* + * mptfc_dump_lun_info + * @ioc + * @rport + * @sdev + * + */ +static void +mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device *sdev, + VirtTarget *vtarget) +{ + u64 nn, pn; + struct mptfc_rport_info *ri; + + ri = *((struct mptfc_rport_info **)rport->dd_data); + pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; + nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " + "CurrentTargetID %d, %x %llx %llx\n", + ioc->name, + sdev->host->host_no, + vtarget->num_luns, + sdev->id, ri->pg0.CurrentTargetID, + ri->pg0.PortIdentifier, + (unsigned long long)pn, + (unsigned long long)nn)); +} + + +/* + * OS entry point to allow host driver to alloc memory + * for each scsi device. Called once per device the bus scan. + * Return non-zero if allocation fails. + * Init memory once per LUN. + */ +static int +mptfc_slave_alloc(struct scsi_device *sdev) +{ + MPT_SCSI_HOST *hd; + VirtTarget *vtarget; + VirtDevice *vdevice; + struct scsi_target *starget; + struct fc_rport *rport; + MPT_ADAPTER *ioc; + + starget = scsi_target(sdev); + rport = starget_to_rport(starget); + + if (!rport || fc_remote_port_chkready(rport)) + return -ENXIO; + + hd = shost_priv(sdev->host); + ioc = hd->ioc; + + vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL); + if (!vdevice) { + printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n", + ioc->name, sizeof(VirtDevice)); + return -ENOMEM; + } + + + sdev->hostdata = vdevice; + vtarget = starget->hostdata; + + if (vtarget->num_luns == 0) { + vtarget->ioc_id = ioc->id; + vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; + } + + vdevice->vtarget = vtarget; + vdevice->lun = sdev->lun; + + vtarget->num_luns++; + + + mptfc_dump_lun_info(ioc, rport, sdev, vtarget); + + return 0; +} + +static int +mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt) +{ + struct mptfc_rport_info *ri; + struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); + int err; + VirtDevice *vdevice = SCpnt->device->hostdata; + + if (!vdevice || !vdevice->vtarget) { + SCpnt->result = DID_NO_CONNECT << 16; + SCpnt->scsi_done(SCpnt); + return 0; + } + + err = fc_remote_port_chkready(rport); + if (unlikely(err)) { + SCpnt->result = err; + SCpnt->scsi_done(SCpnt); + return 0; + } + + /* dd_data is null until finished adding target */ + ri = *((struct mptfc_rport_info **)rport->dd_data); + if (unlikely(!ri)) { + SCpnt->result = DID_IMM_RETRY << 16; + SCpnt->scsi_done(SCpnt); + return 0; + } + + return mptscsih_qcmd(SCpnt); +} + +/* + * mptfc_display_port_link_speed - displaying link speed + * @ioc: Pointer to MPT_ADAPTER structure + * @portnum: IOC Port number + * @pp0dest: port page0 data payload + * + */ +static void +mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0dest) +{ + u8 old_speed, new_speed, state; + char *old, *new; + + if (portnum >= 2) + return; + + old_speed = ioc->fc_link_speed[portnum]; + new_speed = pp0dest->CurrentSpeed; + state = pp0dest->PortState; + + if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE && + new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN) { + + old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : + old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : + old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" : + "Unknown"; + new = new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : + new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : + new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" : + "Unknown"; + if (old_speed == 0) + printk(MYIOC_s_NOTE_FMT + "FC Link Established, Speed = %s\n", + ioc->name, new); + else if (old_speed != new_speed) + printk(MYIOC_s_WARN_FMT + "FC Link Speed Change, Old Speed = %s, New Speed = %s\n", + ioc->name, old, new); + + ioc->fc_link_speed[portnum] = new_speed; + } +} + +/* + * mptfc_GetFcPortPage0 - Fetch FCPort config Page0. + * @ioc: Pointer to MPT_ADAPTER structure + * @portnum: IOC Port number + * + * Return: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + * -EINVAL portnum arg out of range (hardwired to two elements) + */ +static int +mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + FCPortPage0_t *ppage0_alloc; + FCPortPage0_t *pp0dest; + dma_addr_t page0_dma; + int data_sz; + int copy_sz; + int rc; + int count = 400; + + if (portnum > 1) + return -EINVAL; + + /* Get FCPort Page 0 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = portnum; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); + if (ppage0_alloc) { + + try_again: + memset((u8 *)ppage0_alloc, 0, data_sz); + cfg.physAddr = page0_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + pp0dest = &ioc->fc_port_page0[portnum]; + copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz); + memcpy(pp0dest, ppage0_alloc, copy_sz); + + /* + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ + pp0dest->Flags = le32_to_cpu(pp0dest->Flags); + pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier); + pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low); + pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High); + pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low); + pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High); + pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass); + pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds); + pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed); + pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize); + pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low); + pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High); + pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low); + pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High); + pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount); + pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators); + + /* + * if still doing discovery, + * hang loose a while until finished + */ + if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) || + (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE && + (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) + == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) { + if (count-- > 0) { + msleep(100); + goto try_again; + } + printk(MYIOC_s_INFO_FMT "Firmware discovery not" + " complete.\n", + ioc->name); + } + mptfc_display_port_link_speed(ioc, portnum, pp0dest); + } + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); + } + + return rc; +} + +static int +mptfc_WriteFcPortPage1(MPT_ADAPTER *ioc, int portnum) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + int rc; + + if (portnum > 1) + return -EINVAL; + + if (!(ioc->fc_data.fc_port_page1[portnum].data)) + return -EINVAL; + + /* get fcport page 1 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 1; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = portnum; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return -ENODEV; + + if (hdr.PageLength*4 != ioc->fc_data.fc_port_page1[portnum].pg_sz) + return -EINVAL; + + cfg.physAddr = ioc->fc_data.fc_port_page1[portnum].dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + + rc = mpt_config(ioc, &cfg); + + return rc; +} + +static int +mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + FCPortPage1_t *page1_alloc; + dma_addr_t page1_dma; + int data_sz; + int rc; + + if (portnum > 1) + return -EINVAL; + + /* get fcport page 1 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 1; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.cfghdr.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = portnum; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return -ENODEV; + +start_over: + + if (ioc->fc_data.fc_port_page1[portnum].data == NULL) { + data_sz = hdr.PageLength * 4; + if (data_sz < sizeof(FCPortPage1_t)) + data_sz = sizeof(FCPortPage1_t); + + page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev, + data_sz, + &page1_dma); + if (!page1_alloc) + return -ENOMEM; + } + else { + page1_alloc = ioc->fc_data.fc_port_page1[portnum].data; + page1_dma = ioc->fc_data.fc_port_page1[portnum].dma; + data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz; + if (hdr.PageLength * 4 > data_sz) { + ioc->fc_data.fc_port_page1[portnum].data = NULL; + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) + page1_alloc, page1_dma); + goto start_over; + } + } + + memset(page1_alloc,0,data_sz); + + cfg.physAddr = page1_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + ioc->fc_data.fc_port_page1[portnum].data = page1_alloc; + ioc->fc_data.fc_port_page1[portnum].pg_sz = data_sz; + ioc->fc_data.fc_port_page1[portnum].dma = page1_dma; + } + else { + ioc->fc_data.fc_port_page1[portnum].data = NULL; + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) + page1_alloc, page1_dma); + } + + return rc; +} + +static void +mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc) +{ + int ii; + FCPortPage1_t *pp1; + + #define MPTFC_FW_DEVICE_TIMEOUT (1) + #define MPTFC_FW_IO_PEND_TIMEOUT (1) + #define ON_FLAGS (MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY) + #define OFF_FLAGS (MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS) + + for (ii=0; iifacts.NumberOfPorts; ii++) { + if (mptfc_GetFcPortPage1(ioc, ii) != 0) + continue; + pp1 = ioc->fc_data.fc_port_page1[ii].data; + if ((pp1->InitiatorDeviceTimeout == MPTFC_FW_DEVICE_TIMEOUT) + && (pp1->InitiatorIoPendTimeout == MPTFC_FW_IO_PEND_TIMEOUT) + && ((pp1->Flags & ON_FLAGS) == ON_FLAGS) + && ((pp1->Flags & OFF_FLAGS) == 0)) + continue; + pp1->InitiatorDeviceTimeout = MPTFC_FW_DEVICE_TIMEOUT; + pp1->InitiatorIoPendTimeout = MPTFC_FW_IO_PEND_TIMEOUT; + pp1->Flags &= ~OFF_FLAGS; + pp1->Flags |= ON_FLAGS; + mptfc_WriteFcPortPage1(ioc, ii); + } +} + + +static void +mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) +{ + unsigned class = 0; + unsigned cos = 0; + unsigned speed; + unsigned port_type; + unsigned port_state; + FCPortPage0_t *pp0; + struct Scsi_Host *sh; + char *sn; + + /* don't know what to do as only one scsi (fc) host was allocated */ + if (portnum != 0) + return; + + pp0 = &ioc->fc_port_page0[portnum]; + sh = ioc->sh; + + sn = fc_host_symbolic_name(sh); + snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh", + ioc->prod_name, + MPT_FW_REV_MAGIC_ID_STRING, + ioc->facts.FWVersion.Word); + + fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; + + fc_host_maxframe_size(sh) = pp0->MaxFrameSize; + + fc_host_node_name(sh) = + (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; + + fc_host_port_name(sh) = + (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low; + + fc_host_port_id(sh) = pp0->PortIdentifier; + + class = pp0->SupportedServiceClass; + if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1) + cos |= FC_COS_CLASS1; + if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2) + cos |= FC_COS_CLASS2; + if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3) + cos |= FC_COS_CLASS3; + fc_host_supported_classes(sh) = cos; + + if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT) + speed = FC_PORTSPEED_1GBIT; + else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT) + speed = FC_PORTSPEED_2GBIT; + else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT) + speed = FC_PORTSPEED_4GBIT; + else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT) + speed = FC_PORTSPEED_10GBIT; + else + speed = FC_PORTSPEED_UNKNOWN; + fc_host_speed(sh) = speed; + + speed = 0; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED) + speed |= FC_PORTSPEED_1GBIT; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED) + speed |= FC_PORTSPEED_2GBIT; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED) + speed |= FC_PORTSPEED_4GBIT; + if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED) + speed |= FC_PORTSPEED_10GBIT; + fc_host_supported_speeds(sh) = speed; + + port_state = FC_PORTSTATE_UNKNOWN; + if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE) + port_state = FC_PORTSTATE_ONLINE; + else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE) + port_state = FC_PORTSTATE_LINKDOWN; + fc_host_port_state(sh) = port_state; + + port_type = FC_PORTTYPE_UNKNOWN; + if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT) + port_type = FC_PORTTYPE_PTP; + else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP) + port_type = FC_PORTTYPE_LPORT; + else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP) + port_type = FC_PORTTYPE_NLPORT; + else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT) + port_type = FC_PORTTYPE_NPORT; + fc_host_port_type(sh) = port_type; + + fc_host_fabric_name(sh) = + (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ? + (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low : + (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low; + +} + +static void +mptfc_link_status_change(struct work_struct *work) +{ + MPT_ADAPTER *ioc = + container_of(work, MPT_ADAPTER, fc_rescan_work); + int ii; + + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) + (void) mptfc_GetFcPortPage0(ioc, ii); + +} + +static void +mptfc_setup_reset(struct work_struct *work) +{ + MPT_ADAPTER *ioc = + container_of(work, MPT_ADAPTER, fc_setup_reset_work); + u64 pn; + struct mptfc_rport_info *ri; + struct scsi_target *starget; + VirtTarget *vtarget; + + /* reset about to happen, delete (block) all rports */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { + ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED; + fc_remote_port_delete(ri->rport); /* won't sleep */ + ri->rport = NULL; + starget = ri->starget; + if (starget) { + vtarget = starget->hostdata; + if (vtarget) + vtarget->deleted = 1; + } + + pn = (u64)ri->pg0.WWPN.High << 32 | + (u64)ri->pg0.WWPN.Low; + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "mptfc_setup_reset.%d: %llx deleted\n", + ioc->name, + ioc->sh->host_no, + (unsigned long long)pn)); + } + } +} + +static void +mptfc_rescan_devices(struct work_struct *work) +{ + MPT_ADAPTER *ioc = + container_of(work, MPT_ADAPTER, fc_rescan_work); + int ii; + u64 pn; + struct mptfc_rport_info *ri; + struct scsi_target *starget; + VirtTarget *vtarget; + + /* start by tagging all ports as missing */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { + ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; + } + } + + /* + * now rescan devices known to adapter, + * will reregister existing rports + */ + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + (void) mptfc_GetFcPortPage0(ioc, ii); + mptfc_init_host_attr(ioc, ii); /* refresh */ + mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev); + } + + /* delete devices still missing */ + list_for_each_entry(ri, &ioc->fc_rports, list) { + /* if newly missing, delete it */ + if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { + + ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| + MPT_RPORT_INFO_FLAGS_MISSING); + fc_remote_port_delete(ri->rport); /* won't sleep */ + ri->rport = NULL; + starget = ri->starget; + if (starget) { + vtarget = starget->hostdata; + if (vtarget) + vtarget->deleted = 1; + } + + pn = (u64)ri->pg0.WWPN.High << 32 | + (u64)ri->pg0.WWPN.Low; + dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT + "mptfc_rescan.%d: %llx deleted\n", + ioc->name, + ioc->sh->host_no, + (unsigned long long)pn)); + } + } +} + +static int +mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct Scsi_Host *sh; + MPT_SCSI_HOST *hd; + MPT_ADAPTER *ioc; + unsigned long flags; + int ii; + int numSGE = 0; + int scale; + int ioc_cap; + int error=0; + int r; + + if ((r = mpt_attach(pdev,id)) != 0) + return r; + + ioc = pci_get_drvdata(pdev); + ioc->DoneCtx = mptfcDoneCtx; + ioc->TaskCtx = mptfcTaskCtx; + ioc->InternalCtx = mptfcInternalCtx; + + /* Added sanity check on readiness of the MPT adapter. + */ + if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { + printk(MYIOC_s_WARN_FMT + "Skipping because it's not operational!\n", + ioc->name); + error = -ENODEV; + goto out_mptfc_probe; + } + + if (!ioc->active) { + printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n", + ioc->name); + error = -ENODEV; + goto out_mptfc_probe; + } + + /* Sanity check - ensure at least 1 port is INITIATOR capable + */ + ioc_cap = 0; + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + if (ioc->pfacts[ii].ProtocolFlags & + MPI_PORTFACTS_PROTOCOL_INITIATOR) + ioc_cap ++; + } + + if (!ioc_cap) { + printk(MYIOC_s_WARN_FMT + "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", + ioc->name, ioc); + return 0; + } + + sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); + + if (!sh) { + printk(MYIOC_s_WARN_FMT + "Unable to register controller with SCSI subsystem\n", + ioc->name); + error = -1; + goto out_mptfc_probe; + } + + spin_lock_init(&ioc->fc_rescan_work_lock); + INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); + INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); + INIT_WORK(&ioc->fc_lsc_work, mptfc_link_status_change); + + spin_lock_irqsave(&ioc->FreeQlock, flags); + + /* Attach the SCSI Host to the IOC structure + */ + ioc->sh = sh; + + sh->io_port = 0; + sh->n_io_port = 0; + sh->irq = 0; + + /* set 16 byte cdb's */ + sh->max_cmd_len = 16; + + sh->max_id = ioc->pfacts->MaxDevices; + sh->max_lun = max_lun; + + /* Required entry. + */ + sh->unique_id = ioc->id; + + /* Verify that we won't exceed the maximum + * number of chain buffers + * We can optimize: ZZ = req_sz/sizeof(SGE) + * For 32bit SGE's: + * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ + * + (req_sz - 64)/sizeof(SGE) + * A slightly different algorithm is required for + * 64bit SGEs. + */ + scale = ioc->req_sz/ioc->SGE_size; + if (ioc->sg_addr_size == sizeof(u64)) { + numSGE = (scale - 1) * + (ioc->facts.MaxChainDepth-1) + scale + + (ioc->req_sz - 60) / ioc->SGE_size; + } else { + numSGE = 1 + (scale - 1) * + (ioc->facts.MaxChainDepth-1) + scale + + (ioc->req_sz - 64) / ioc->SGE_size; + } + + if (numSGE < sh->sg_tablesize) { + /* Reset this value */ + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Resetting sg_tablesize to %d from %d\n", + ioc->name, numSGE, sh->sg_tablesize)); + sh->sg_tablesize = numSGE; + } + + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + hd = shost_priv(sh); + hd->ioc = ioc; + + /* SCSI needs scsi_cmnd lookup table! + * (with size equal to req_depth*PtrSz!) + */ + ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC); + if (!ioc->ScsiLookup) { + error = -ENOMEM; + goto out_mptfc_probe; + } + spin_lock_init(&ioc->scsi_lookup_lock); + + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n", + ioc->name, ioc->ScsiLookup)); + + hd->last_queue_full = 0; + + sh->transportt = mptfc_transport_template; + error = scsi_add_host (sh, &ioc->pcidev->dev); + if(error) { + dprintk(ioc, printk(MYIOC_s_ERR_FMT + "scsi_add_host failed\n", ioc->name)); + goto out_mptfc_probe; + } + + /* initialize workqueue */ + + snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), + "mptfc_wq_%d", sh->host_no); + ioc->fc_rescan_work_q = + alloc_ordered_workqueue(ioc->fc_rescan_work_q_name, + WQ_MEM_RECLAIM); + if (!ioc->fc_rescan_work_q) { + error = -ENOMEM; + goto out_mptfc_host; + } + + /* + * Pre-fetch FC port WWN and stuff... + * (FCPortPage0_t stuff) + */ + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + (void) mptfc_GetFcPortPage0(ioc, ii); + } + mptfc_SetFcPortPage1_defaults(ioc); + + /* + * scan for rports - + * by doing it via the workqueue, some locking is eliminated + */ + + queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); + flush_workqueue(ioc->fc_rescan_work_q); + + return 0; + +out_mptfc_host: + scsi_remove_host(sh); + +out_mptfc_probe: + + mptscsih_remove(pdev); + return error; +} + +static struct pci_driver mptfc_driver = { + .name = "mptfc", + .id_table = mptfc_pci_table, + .probe = mptfc_probe, + .remove = mptfc_remove, + .shutdown = mptscsih_shutdown, +#ifdef CONFIG_PM + .suspend = mptscsih_suspend, + .resume = mptscsih_resume, +#endif +}; + +static int +mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) +{ + MPT_SCSI_HOST *hd; + u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; + unsigned long flags; + int rc=1; + + if (ioc->bus_type != FC) + return 0; + + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", + ioc->name, event)); + + if (ioc->sh == NULL || + ((hd = shost_priv(ioc->sh)) == NULL)) + return 1; + + switch (event) { + case MPI_EVENT_RESCAN: + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + if (ioc->fc_rescan_work_q) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); + } + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + break; + case MPI_EVENT_LINK_STATUS_CHANGE: + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + if (ioc->fc_rescan_work_q) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_lsc_work); + } + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + break; + default: + rc = mptscsih_event_process(ioc,pEvReply); + break; + } + return rc; +} + +static int +mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + int rc; + unsigned long flags; + + rc = mptscsih_ioc_reset(ioc,reset_phase); + if ((ioc->bus_type != FC) || (!rc)) + return rc; + + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + ": IOC %s_reset routed to FC host driver!\n",ioc->name, + reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); + + if (reset_phase == MPT_IOC_SETUP_RESET) { + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + if (ioc->fc_rescan_work_q) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_setup_reset_work); + } + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + } + + else if (reset_phase == MPT_IOC_PRE_RESET) { + } + + else { /* MPT_IOC_POST_RESET */ + mptfc_SetFcPortPage1_defaults(ioc); + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + if (ioc->fc_rescan_work_q) { + queue_work(ioc->fc_rescan_work_q, + &ioc->fc_rescan_work); + } + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + } + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer. + * + * Returns 0 for success, non-zero for failure. + */ +static int __init +mptfc_init(void) +{ + int error; + + show_mptmod_ver(my_NAME, my_VERSION); + + /* sanity check module parameters */ + if (mptfc_dev_loss_tmo <= 0) + mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; + + mptfc_transport_template = + fc_attach_transport(&mptfc_transport_functions); + + if (!mptfc_transport_template) + return -ENODEV; + + mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER, + "mptscsih_scandv_complete"); + mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER, + "mptscsih_scandv_complete"); + mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER, + "mptscsih_scandv_complete"); + + mpt_event_register(mptfcDoneCtx, mptfc_event_process); + mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset); + + error = pci_register_driver(&mptfc_driver); + if (error) + fc_release_transport(mptfc_transport_template); + + return error; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptfc_remove - Remove fc infrastructure for devices + * @pdev: Pointer to pci_dev structure + * + */ +static void mptfc_remove(struct pci_dev *pdev) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + struct mptfc_rport_info *p, *n; + struct workqueue_struct *work_q; + unsigned long flags; + int ii; + + /* destroy workqueue */ + if ((work_q=ioc->fc_rescan_work_q)) { + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); + ioc->fc_rescan_work_q = NULL; + spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + destroy_workqueue(work_q); + } + + fc_remove_host(ioc->sh); + + list_for_each_entry_safe(p, n, &ioc->fc_rports, list) { + list_del(&p->list); + kfree(p); + } + + for (ii=0; iifacts.NumberOfPorts; ii++) { + if (ioc->fc_data.fc_port_page1[ii].data) { + pci_free_consistent(ioc->pcidev, + ioc->fc_data.fc_port_page1[ii].pg_sz, + (u8 *) ioc->fc_data.fc_port_page1[ii].data, + ioc->fc_data.fc_port_page1[ii].dma); + ioc->fc_data.fc_port_page1[ii].data = NULL; + } + } + + scsi_remove_host(ioc->sh); + + mptscsih_remove(pdev); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptfc_exit - Unregisters MPT adapter(s) + * + */ +static void __exit +mptfc_exit(void) +{ + pci_unregister_driver(&mptfc_driver); + fc_release_transport(mptfc_transport_template); + + mpt_reset_deregister(mptfcDoneCtx); + mpt_event_deregister(mptfcDoneCtx); + + mpt_deregister(mptfcInternalCtx); + mpt_deregister(mptfcTaskCtx); + mpt_deregister(mptfcDoneCtx); +} + +module_init(mptfc_init); +module_exit(mptfc_exit); diff --git a/mptsas-kmod/el8/mptlan.c b/mptsas-kmod/el8/mptlan.c new file mode 100644 index 00000000..ebc00d47 --- /dev/null +++ b/mptsas-kmod/el8/mptlan.c @@ -0,0 +1,1538 @@ +/* + * linux/drivers/message/fusion/mptlan.c + * IP Over Fibre Channel device driver. + * For use with LSI Fibre Channel PCI chip/adapters + * running LSI Fusion MPT (Message Passing Technology) firmware. + * + * Copyright (c) 2000-2008 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Define statements used for debugging + */ +//#define MPT_LAN_IO_DEBUG + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "mptlan.h" +#include +#include +#include +#include +#include + +#define my_VERSION MPT_LINUX_VERSION_COMMON +#define MYNAM "mptlan" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(my_VERSION); + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * MPT LAN message sizes without variable part. + */ +#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \ + (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION)) + +#define MPT_LAN_TRANSACTION32_SIZE \ + (sizeof(SGETransaction32_t) - sizeof(u32)) + +/* + * Fusion MPT LAN private structures + */ + +struct BufferControl { + struct sk_buff *skb; + dma_addr_t dma; + unsigned int len; +}; + +struct mpt_lan_priv { + MPT_ADAPTER *mpt_dev; + u8 pnum; /* Port number in the IOC. This is not a Unix network port! */ + + atomic_t buckets_out; /* number of unused buckets on IOC */ + int bucketthresh; /* Send more when this many left */ + + int *mpt_txfidx; /* Free Tx Context list */ + int mpt_txfidx_tail; + spinlock_t txfidx_lock; + + int *mpt_rxfidx; /* Free Rx Context list */ + int mpt_rxfidx_tail; + spinlock_t rxfidx_lock; + + struct BufferControl *RcvCtl; /* Receive BufferControl structs */ + struct BufferControl *SendCtl; /* Send BufferControl structs */ + + int max_buckets_out; /* Max buckets to send to IOC */ + int tx_max_out; /* IOC's Tx queue len */ + + u32 total_posted; + u32 total_received; + + struct delayed_work post_buckets_task; + struct net_device *dev; + unsigned long post_buckets_active; +}; + +struct mpt_lan_ohdr { + u16 dtype; + u8 daddr[FC_ALEN]; + u16 stype; + u8 saddr[FC_ALEN]; +}; + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +/* + * Forward protos... + */ +static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, + MPT_FRAME_HDR *reply); +static int mpt_lan_open(struct net_device *dev); +static int mpt_lan_reset(struct net_device *dev); +static int mpt_lan_close(struct net_device *dev); +static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv); +static void mpt_lan_wake_post_buckets_task(struct net_device *dev, + int priority); +static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); +static int mpt_lan_receive_post_reply(struct net_device *dev, + LANReceivePostReply_t *pRecvRep); +static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg); +static int mpt_lan_send_reply(struct net_device *dev, + LANSendReply_t *pSendRep); +static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); +static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +static unsigned short mpt_lan_type_trans(struct sk_buff *skb, + struct net_device *dev); + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Fusion MPT LAN private data + */ +static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS; + +static u32 max_buckets_out = 127; +static u32 tx_max_out_p = 127 - 16; + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * lan_reply - Handle all data sent from the hardware. + * @ioc: Pointer to MPT_ADAPTER structure + * @mf: Pointer to original MPT request frame (NULL if TurboReply) + * @reply: Pointer to MPT reply frame + * + * Returns 1 indicating original alloc'd request frame ptr + * should be freed, or 0 if it shouldn't. + */ +static int +lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) +{ + struct net_device *dev = ioc->netdev; + int FreeReqFrame = 0; + + dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n", + IOC_AND_NETDEV_NAMES_s_s(dev))); + +// dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n", +// mf, reply)); + + if (mf == NULL) { + u32 tmsg = CAST_PTR_TO_U32(reply); + + dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + tmsg)); + + switch (GET_LAN_FORM(tmsg)) { + + // NOTE! (Optimization) First case here is now caught in + // mptbase.c::mpt_interrupt() routine and callcack here + // is now skipped for this case! +#if 0 + case LAN_REPLY_FORM_MESSAGE_CONTEXT: +// dioprintk((KERN_INFO MYNAM "/lan_reply: " +// "MessageContext turbo reply received\n")); + FreeReqFrame = 1; + break; +#endif + + case LAN_REPLY_FORM_SEND_SINGLE: +// dioprintk((MYNAM "/lan_reply: " +// "calling mpt_lan_send_reply (turbo)\n")); + + // Potential BUG here? + // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg); + // If/when mpt_lan_send_turbo would return 1 here, + // calling routine (mptbase.c|mpt_interrupt) + // would Oops because mf has already been set + // to NULL. So after return from this func, + // mpt_interrupt() will attempt to put (NULL) mf ptr + // item back onto its adapter FreeQ - Oops!:-( + // It's Ok, since mpt_lan_send_turbo() *currently* + // always returns 0, but..., just in case: + + (void) mpt_lan_send_turbo(dev, tmsg); + FreeReqFrame = 0; + + break; + + case LAN_REPLY_FORM_RECEIVE_SINGLE: +// dioprintk((KERN_INFO MYNAM "@lan_reply: " +// "rcv-Turbo = %08x\n", tmsg)); + mpt_lan_receive_post_turbo(dev, tmsg); + break; + + default: + printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply " + "that I don't know what to do with\n"); + + /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */ + + break; + } + + return FreeReqFrame; + } + +// msg = (u32 *) reply; +// dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n", +// le32_to_cpu(msg[0]), le32_to_cpu(msg[1]), +// le32_to_cpu(msg[2]), le32_to_cpu(msg[3]))); +// dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n", +// reply->u.hdr.Function)); + + switch (reply->u.hdr.Function) { + + case MPI_FUNCTION_LAN_SEND: + { + LANSendReply_t *pSendRep; + + pSendRep = (LANSendReply_t *) reply; + FreeReqFrame = mpt_lan_send_reply(dev, pSendRep); + break; + } + + case MPI_FUNCTION_LAN_RECEIVE: + { + LANReceivePostReply_t *pRecvRep; + + pRecvRep = (LANReceivePostReply_t *) reply; + if (pRecvRep->NumberOfContexts) { + mpt_lan_receive_post_reply(dev, pRecvRep); + if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) + FreeReqFrame = 1; + } else + dioprintk((KERN_INFO MYNAM "@lan_reply: zero context " + "ReceivePostReply received.\n")); + break; + } + + case MPI_FUNCTION_LAN_RESET: + /* Just a default reply. Might want to check it to + * make sure that everything went ok. + */ + FreeReqFrame = 1; + break; + + case MPI_FUNCTION_EVENT_NOTIFICATION: + case MPI_FUNCTION_EVENT_ACK: + /* _EVENT_NOTIFICATION should NOT come down this path any more. + * Should be routed to mpt_lan_event_process(), but just in case... + */ + FreeReqFrame = 1; + break; + + default: + printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo " + "reply that I don't know what to do with\n"); + + /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */ + FreeReqFrame = 1; + + break; + } + + return FreeReqFrame; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + struct net_device *dev = ioc->netdev; + struct mpt_lan_priv *priv; + + if (dev == NULL) + return(1); + else + priv = netdev_priv(dev); + + dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", + reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); + + if (priv->mpt_rxfidx == NULL) + return (1); + + if (reset_phase == MPT_IOC_SETUP_RESET) { + ; + } else if (reset_phase == MPT_IOC_PRE_RESET) { + int i; + unsigned long flags; + + netif_stop_queue(dev); + + dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name)); + + atomic_set(&priv->buckets_out, 0); + + /* Reset Rx Free Tail index and re-populate the queue. */ + spin_lock_irqsave(&priv->rxfidx_lock, flags); + priv->mpt_rxfidx_tail = -1; + for (i = 0; i < priv->max_buckets_out; i++) + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + } else { + mpt_lan_post_receive_buckets(priv); + netif_wake_queue(dev); + } + + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) +{ + dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n")); + + switch (le32_to_cpu(pEvReply->Event)) { + case MPI_EVENT_NONE: /* 00 */ + case MPI_EVENT_LOG_DATA: /* 01 */ + case MPI_EVENT_STATE_CHANGE: /* 02 */ + case MPI_EVENT_UNIT_ATTENTION: /* 03 */ + case MPI_EVENT_IOC_BUS_RESET: /* 04 */ + case MPI_EVENT_EXT_BUS_RESET: /* 05 */ + case MPI_EVENT_RESCAN: /* 06 */ + /* Ok, do we need to do anything here? As far as + I can tell, this is when a new device gets added + to the loop. */ + case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */ + case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */ + case MPI_EVENT_LOGOUT: /* 09 */ + case MPI_EVENT_EVENT_CHANGE: /* 0A */ + default: + break; + } + + /* + * NOTE: pEvent->AckRequired handling now done in mptbase.c; + * Do NOT do it here now! + */ + + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_open(struct net_device *dev) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + int i; + + if (mpt_lan_reset(dev) != 0) { + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + + printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed."); + + if (mpt_dev->active) + printk ("The ioc is active. Perhaps it needs to be" + " reset?\n"); + else + printk ("The ioc in inactive, most likely in the " + "process of being reset. Please try again in " + "a moment.\n"); + } + + priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int), + GFP_KERNEL); + if (priv->mpt_txfidx == NULL) + goto out; + priv->mpt_txfidx_tail = -1; + + priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl), + GFP_KERNEL); + if (priv->SendCtl == NULL) + goto out_mpt_txfidx; + for (i = 0; i < priv->tx_max_out; i++) + priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i; + + dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); + + priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int), + GFP_KERNEL); + if (priv->mpt_rxfidx == NULL) + goto out_SendCtl; + priv->mpt_rxfidx_tail = -1; + + priv->RcvCtl = kcalloc(priv->max_buckets_out, + sizeof(struct BufferControl), + GFP_KERNEL); + if (priv->RcvCtl == NULL) + goto out_mpt_rxfidx; + for (i = 0; i < priv->max_buckets_out; i++) + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; + +/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); +/**/ for (i = 0; i < priv->tx_max_out; i++) +/**/ dlprintk((" %xh", priv->mpt_txfidx[i])); +/**/ dlprintk(("\n")); + + dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); + + mpt_lan_post_receive_buckets(priv); + printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", + IOC_AND_NETDEV_NAMES_s_s(dev)); + + if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) { + printk (KERN_WARNING MYNAM "/lo: Unable to register for Event" + " Notifications. This is a bad thing! We're not going " + "to go ahead, but I'd be leery of system stability at " + "this point.\n"); + } + + netif_start_queue(dev); + dlprintk((KERN_INFO MYNAM "/lo: Done.\n")); + + return 0; +out_mpt_rxfidx: + kfree(priv->mpt_rxfidx); + priv->mpt_rxfidx = NULL; +out_SendCtl: + kfree(priv->SendCtl); + priv->SendCtl = NULL; +out_mpt_txfidx: + kfree(priv->mpt_txfidx); + priv->mpt_txfidx = NULL; +out: return -ENOMEM; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Send a LanReset message to the FW. This should result in the FW returning + any buckets it still has. */ +static int +mpt_lan_reset(struct net_device *dev) +{ + MPT_FRAME_HDR *mf; + LANResetRequest_t *pResetReq; + struct mpt_lan_priv *priv = netdev_priv(dev); + + mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev); + + if (mf == NULL) { +/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " + "Unable to allocate a request frame.\n")); +*/ + return -1; + } + + pResetReq = (LANResetRequest_t *) mf; + + pResetReq->Function = MPI_FUNCTION_LAN_RESET; + pResetReq->ChainOffset = 0; + pResetReq->Reserved = 0; + pResetReq->PortNumber = priv->pnum; + pResetReq->MsgFlags = 0; + pResetReq->Reserved2 = 0; + + mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf); + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_close(struct net_device *dev) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + unsigned long timeout; + int i; + + dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); + + mpt_event_deregister(LanCtx); + + dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " + "since driver was loaded, %d still out\n", + priv->total_posted,atomic_read(&priv->buckets_out))); + + netif_stop_queue(dev); + + mpt_lan_reset(dev); + + timeout = jiffies + 2 * HZ; + while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout)) + schedule_timeout_interruptible(1); + + for (i = 0; i < priv->max_buckets_out; i++) { + if (priv->RcvCtl[i].skb != NULL) { +/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " +/**/ "is still out\n", i)); + pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma, + priv->RcvCtl[i].len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(priv->RcvCtl[i].skb); + } + } + + kfree(priv->RcvCtl); + kfree(priv->mpt_rxfidx); + + for (i = 0; i < priv->tx_max_out; i++) { + if (priv->SendCtl[i].skb != NULL) { + pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma, + priv->SendCtl[i].len, + PCI_DMA_TODEVICE); + dev_kfree_skb(priv->SendCtl[i].skb); + } + } + + kfree(priv->SendCtl); + kfree(priv->mpt_txfidx); + + atomic_set(&priv->buckets_out, 0); + + printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n", + IOC_AND_NETDEV_NAMES_s_s(dev)); + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Tx timeout handler. */ +static void +mpt_lan_tx_timeout(struct net_device *dev) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + + if (mpt_dev->active) { + dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name)); + netif_wake_queue(dev); + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +//static inline int +static int +mpt_lan_send_turbo(struct net_device *dev, u32 tmsg) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + struct sk_buff *sent; + unsigned long flags; + u32 ctx; + + ctx = GET_LAN_BUFFER_CONTEXT(tmsg); + sent = priv->SendCtl[ctx].skb; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += sent->len; + + dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + __func__, sent)); + + priv->SendCtl[ctx].skb = NULL; + pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, + priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(sent); + + spin_lock_irqsave(&priv->txfidx_lock, flags); + priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; + spin_unlock_irqrestore(&priv->txfidx_lock, flags); + + netif_wake_queue(dev); + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + struct sk_buff *sent; + unsigned long flags; + int FreeReqFrame = 0; + u32 *pContext; + u32 ctx; + u8 count; + + count = pSendRep->NumberOfContexts; + + dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n", + le16_to_cpu(pSendRep->IOCStatus))); + + /* Add check for Loginfo Flag in IOCStatus */ + + switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) { + case MPI_IOCSTATUS_SUCCESS: + dev->stats.tx_packets += count; + break; + + case MPI_IOCSTATUS_LAN_CANCELED: + case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: + break; + + case MPI_IOCSTATUS_INVALID_SGL: + dev->stats.tx_errors += count; + printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n", + IOC_AND_NETDEV_NAMES_s_s(dev)); + goto out; + + default: + dev->stats.tx_errors += count; + break; + } + + pContext = &pSendRep->BufferContext; + + spin_lock_irqsave(&priv->txfidx_lock, flags); + while (count > 0) { + ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext)); + + sent = priv->SendCtl[ctx].skb; + dev->stats.tx_bytes += sent->len; + + dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + __func__, sent)); + + priv->SendCtl[ctx].skb = NULL; + pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, + priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(sent); + + priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; + + pContext++; + count--; + } + spin_unlock_irqrestore(&priv->txfidx_lock, flags); + +out: + if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) + FreeReqFrame = 1; + + netif_wake_queue(dev); + return FreeReqFrame; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static netdev_tx_t +mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + MPT_FRAME_HDR *mf; + LANSendRequest_t *pSendReq; + SGETransaction32_t *pTrans; + SGESimple64_t *pSimple; + const unsigned char *mac; + dma_addr_t dma; + unsigned long flags; + int ctx; + u16 cur_naa = 0x1000; + + dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", + __func__, skb)); + + spin_lock_irqsave(&priv->txfidx_lock, flags); + if (priv->mpt_txfidx_tail < 0) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&priv->txfidx_lock, flags); + + printk (KERN_ERR "%s: no tx context available: %u\n", + __func__, priv->mpt_txfidx_tail); + return NETDEV_TX_BUSY; + } + + mf = mpt_get_msg_frame(LanCtx, mpt_dev); + if (mf == NULL) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&priv->txfidx_lock, flags); + + printk (KERN_ERR "%s: Unable to alloc request frame\n", + __func__); + return NETDEV_TX_BUSY; + } + + ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--]; + spin_unlock_irqrestore(&priv->txfidx_lock, flags); + +// dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n", +// IOC_AND_NETDEV_NAMES_s_s(dev))); + + pSendReq = (LANSendRequest_t *) mf; + + /* Set the mac.raw pointer, since this apparently isn't getting + * done before we get the skb. Pull the data pointer past the mac data. + */ + skb_reset_mac_header(skb); + skb_pull(skb, 12); + + dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, + PCI_DMA_TODEVICE); + + priv->SendCtl[ctx].skb = skb; + priv->SendCtl[ctx].dma = dma; + priv->SendCtl[ctx].len = skb->len; + + /* Message Header */ + pSendReq->Reserved = 0; + pSendReq->Function = MPI_FUNCTION_LAN_SEND; + pSendReq->ChainOffset = 0; + pSendReq->Reserved2 = 0; + pSendReq->MsgFlags = 0; + pSendReq->PortNumber = priv->pnum; + + /* Transaction Context Element */ + pTrans = (SGETransaction32_t *) pSendReq->SG_List; + + /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */ + pTrans->ContextSize = sizeof(u32); + pTrans->DetailsLength = 2 * sizeof(u32); + pTrans->Flags = 0; + pTrans->TransactionContext[0] = cpu_to_le32(ctx); + +// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n", +// IOC_AND_NETDEV_NAMES_s_s(dev), +// ctx, skb, skb->data)); + + mac = skb_mac_header(skb); + + pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | + (mac[0] << 8) | + (mac[1] << 0)); + pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) | + (mac[3] << 16) | + (mac[4] << 8) | + (mac[5] << 0)); + + pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; + + /* If we ever decide to send more than one Simple SGE per LANSend, then + we will need to make sure that LAST_ELEMENT only gets set on the + last one. Otherwise, bad voodoo and evil funkiness will commence. */ + pSimple->FlagsLength = cpu_to_le32( + ((MPI_SGE_FLAGS_LAST_ELEMENT | + MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_SYSTEM_ADDRESS | + MPI_SGE_FLAGS_HOST_TO_IOC | + MPI_SGE_FLAGS_64_BIT_ADDRESSING | + MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) | + skb->len); + pSimple->Address.Low = cpu_to_le32((u32) dma); + if (sizeof(dma_addr_t) > sizeof(u32)) + pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32)); + else + pSimple->Address.High = 0; + + mpt_put_msg_frame (LanCtx, mpt_dev, mf); + netif_trans_update(dev); + + dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + le32_to_cpu(pSimple->FlagsLength))); + + return NETDEV_TX_OK; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static void +mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) +/* + * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue + */ +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + + if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { + if (priority) { + schedule_delayed_work(&priv->post_buckets_task, 0); + } else { + schedule_delayed_work(&priv->post_buckets_task, 1); + dioprintk((KERN_INFO MYNAM ": post_buckets queued on " + "timer.\n")); + } + dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", + IOC_AND_NETDEV_NAMES_s_s(dev) )); + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + + skb->protocol = mpt_lan_type_trans(skb, dev); + + dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) " + "delivered to upper level.\n", + IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); + + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + skb->dev = dev; + netif_rx(skb); + + dioprintk((MYNAM "/receive_skb: %d buckets remaining\n", + atomic_read(&priv->buckets_out))); + + if (atomic_read(&priv->buckets_out) < priv->bucketthresh) + mpt_lan_wake_post_buckets_task(dev, 1); + + dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets " + "remaining, %d received back since sod\n", + atomic_read(&priv->buckets_out), priv->total_received)); + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +//static inline int +static int +mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + struct sk_buff *skb, *old_skb; + unsigned long flags; + u32 ctx, len; + + ctx = GET_LAN_BUCKET_CONTEXT(tmsg); + skb = priv->RcvCtl[ctx].skb; + + len = GET_LAN_PACKET_LENGTH(tmsg); + + if (len < MPT_LAN_RX_COPYBREAK) { + old_skb = skb; + + skb = (struct sk_buff *)dev_alloc_skb(len); + if (!skb) { + printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + __FILE__, __LINE__); + return -ENOMEM; + } + + pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); + + skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); + + pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); + goto out; + } + + skb_put(skb, len); + + priv->RcvCtl[ctx].skb = NULL; + + pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); + +out: + spin_lock_irqsave(&priv->rxfidx_lock, flags); + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + + atomic_dec(&priv->buckets_out); + priv->total_received++; + + return mpt_lan_receive_skb(dev, skb); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_receive_post_free(struct net_device *dev, + LANReceivePostReply_t *pRecvRep) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + unsigned long flags; + struct sk_buff *skb; + u32 ctx; + int count; + int i; + + count = pRecvRep->NumberOfContexts; + +/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: " + "IOC returned %d buckets, freeing them...\n", count)); + + spin_lock_irqsave(&priv->rxfidx_lock, flags); + for (i = 0; i < count; i++) { + ctx = le32_to_cpu(pRecvRep->BucketContext[i]); + + skb = priv->RcvCtl[ctx].skb; + +// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n", +// IOC_AND_NETDEV_NAMES_s_s(dev))); +// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p", +// priv, &(priv->buckets_out))); +// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); + + priv->RcvCtl[ctx].skb = NULL; + pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; + } + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + + atomic_sub(count, &priv->buckets_out); + +// for (i = 0; i < priv->max_buckets_out; i++) +// if (priv->RcvCtl[i].skb != NULL) +// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x " +// "is still out\n", i)); + +/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", + count)); +*/ +/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " +/**/ "remaining, %d received back since sod.\n", +/**/ atomic_read(&priv->buckets_out), priv->total_received)); + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mpt_lan_receive_post_reply(struct net_device *dev, + LANReceivePostReply_t *pRecvRep) +{ + struct mpt_lan_priv *priv = netdev_priv(dev); + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + struct sk_buff *skb, *old_skb; + unsigned long flags; + u32 len, ctx, offset; + u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); + int count; + int i, l; + + dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n")); + dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n", + le16_to_cpu(pRecvRep->IOCStatus))); + + if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) == + MPI_IOCSTATUS_LAN_CANCELED) + return mpt_lan_receive_post_free(dev, pRecvRep); + + len = le32_to_cpu(pRecvRep->PacketLength); + if (len == 0) { + printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO " + "ReceivePostReply w/ PacketLength zero!\n", + IOC_AND_NETDEV_NAMES_s_s(dev)); + printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n", + pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus)); + return -1; + } + + ctx = le32_to_cpu(pRecvRep->BucketContext[0]); + count = pRecvRep->NumberOfContexts; + skb = priv->RcvCtl[ctx].skb; + + offset = le32_to_cpu(pRecvRep->PacketOffset); +// if (offset != 0) { +// printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply " +// "w/ PacketOffset %u\n", +// IOC_AND_NETDEV_NAMES_s_s(dev), +// offset); +// } + + dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + offset, len)); + + if (count > 1) { + int szrem = len; + +// dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned " +// "for single packet, concatenating...\n", +// IOC_AND_NETDEV_NAMES_s_s(dev))); + + skb = (struct sk_buff *)dev_alloc_skb(len); + if (!skb) { + printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + __FILE__, __LINE__); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->rxfidx_lock, flags); + for (i = 0; i < count; i++) { + + ctx = le32_to_cpu(pRecvRep->BucketContext[i]); + old_skb = priv->RcvCtl[ctx].skb; + + l = priv->RcvCtl[ctx].len; + if (szrem < l) + l = szrem; + +// dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n", +// IOC_AND_NETDEV_NAMES_s_s(dev), +// i, l)); + + pci_dma_sync_single_for_cpu(mpt_dev->pcidev, + priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, + PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); + + pci_dma_sync_single_for_device(mpt_dev->pcidev, + priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, + PCI_DMA_FROMDEVICE); + + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; + szrem -= l; + } + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + + } else if (len < MPT_LAN_RX_COPYBREAK) { + + old_skb = skb; + + skb = (struct sk_buff *)dev_alloc_skb(len); + if (!skb) { + printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + __FILE__, __LINE__); + return -ENOMEM; + } + + pci_dma_sync_single_for_cpu(mpt_dev->pcidev, + priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, + PCI_DMA_FROMDEVICE); + + skb_copy_from_linear_data(old_skb, skb_put(skb, len), len); + + pci_dma_sync_single_for_device(mpt_dev->pcidev, + priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, + PCI_DMA_FROMDEVICE); + + spin_lock_irqsave(&priv->rxfidx_lock, flags); + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + + } else { + spin_lock_irqsave(&priv->rxfidx_lock, flags); + + priv->RcvCtl[ctx].skb = NULL; + + pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); + priv->RcvCtl[ctx].dma = 0; + + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + + skb_put(skb,len); + } + + atomic_sub(count, &priv->buckets_out); + priv->total_received += count; + + if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) { + printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, " + "MPT_LAN_MAX_BUCKETS_OUT = %d\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + priv->mpt_rxfidx_tail, + MPT_LAN_MAX_BUCKETS_OUT); + + return -1; + } + + if (remaining == 0) + printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " + "(priv->buckets_out = %d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + atomic_read(&priv->buckets_out)); + else if (remaining < 10) + printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. " + "(priv->buckets_out = %d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + remaining, atomic_read(&priv->buckets_out)); + + if ((remaining < priv->bucketthresh) && + ((atomic_read(&priv->buckets_out) - remaining) > + MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) { + + printk (KERN_WARNING MYNAM " Mismatch between driver's " + "buckets_out count and fw's BucketsRemaining " + "count has crossed the threshold, issuing a " + "LanReset to clear the fw's hashtable. You may " + "want to check your /var/log/messages for \"CRC " + "error\" event notifications.\n"); + + mpt_lan_reset(dev); + mpt_lan_wake_post_buckets_task(dev, 0); + } + + return mpt_lan_receive_skb(dev, skb); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Simple SGE's only at the moment */ + +static void +mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv) +{ + struct net_device *dev = priv->dev; + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + MPT_FRAME_HDR *mf; + LANReceivePostRequest_t *pRecvReq; + SGETransaction32_t *pTrans; + SGESimple64_t *pSimple; + struct sk_buff *skb; + dma_addr_t dma; + u32 curr, buckets, count, max; + u32 len = (dev->mtu + dev->hard_header_len + 4); + unsigned long flags; + int i; + + curr = atomic_read(&priv->buckets_out); + buckets = (priv->max_buckets_out - curr); + + dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + __func__, buckets, curr)); + + max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / + (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t)); + + while (buckets) { + mf = mpt_get_msg_frame(LanCtx, mpt_dev); + if (mf == NULL) { + printk (KERN_ERR "%s: Unable to alloc request frame\n", + __func__); + dioprintk((KERN_ERR "%s: %u buckets remaining\n", + __func__, buckets)); + goto out; + } + pRecvReq = (LANReceivePostRequest_t *) mf; + + i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + mpt_dev->RequestNB[i] = 0; + count = buckets; + if (count > max) + count = max; + + pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE; + pRecvReq->ChainOffset = 0; + pRecvReq->MsgFlags = 0; + pRecvReq->PortNumber = priv->pnum; + + pTrans = (SGETransaction32_t *) pRecvReq->SG_List; + pSimple = NULL; + + for (i = 0; i < count; i++) { + int ctx; + + spin_lock_irqsave(&priv->rxfidx_lock, flags); + if (priv->mpt_rxfidx_tail < 0) { + printk (KERN_ERR "%s: Can't alloc context\n", + __func__); + spin_unlock_irqrestore(&priv->rxfidx_lock, + flags); + break; + } + + ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--]; + + skb = priv->RcvCtl[ctx].skb; + if (skb && (priv->RcvCtl[ctx].len != len)) { + pci_unmap_single(mpt_dev->pcidev, + priv->RcvCtl[ctx].dma, + priv->RcvCtl[ctx].len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(priv->RcvCtl[ctx].skb); + skb = priv->RcvCtl[ctx].skb = NULL; + } + + if (skb == NULL) { + skb = dev_alloc_skb(len); + if (skb == NULL) { + printk (KERN_WARNING + MYNAM "/%s: Can't alloc skb\n", + __func__); + priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + break; + } + + dma = pci_map_single(mpt_dev->pcidev, skb->data, + len, PCI_DMA_FROMDEVICE); + + priv->RcvCtl[ctx].skb = skb; + priv->RcvCtl[ctx].dma = dma; + priv->RcvCtl[ctx].len = len; + } + + spin_unlock_irqrestore(&priv->rxfidx_lock, flags); + + pTrans->ContextSize = sizeof(u32); + pTrans->DetailsLength = 0; + pTrans->Flags = 0; + pTrans->TransactionContext[0] = cpu_to_le32(ctx); + + pSimple = (SGESimple64_t *) pTrans->TransactionDetails; + + pSimple->FlagsLength = cpu_to_le32( + ((MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len); + pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma); + if (sizeof(dma_addr_t) > sizeof(u32)) + pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32)); + else + pSimple->Address.High = 0; + + pTrans = (SGETransaction32_t *) (pSimple + 1); + } + + if (pSimple == NULL) { +/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n", +/**/ __func__); + mpt_free_msg_frame(mpt_dev, mf); + goto out; + } + + pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT); + + pRecvReq->BucketCount = cpu_to_le32(i); + +/* printk(KERN_INFO MYNAM ": posting buckets\n "); + * for (i = 0; i < j + 2; i ++) + * printk (" %08x", le32_to_cpu(msg[i])); + * printk ("\n"); + */ + + mpt_put_msg_frame(LanCtx, mpt_dev, mf); + + priv->total_posted += i; + buckets -= i; + atomic_add(i, &priv->buckets_out); + } + +out: + dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", + __func__, buckets, atomic_read(&priv->buckets_out))); + dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", + __func__, priv->total_posted, priv->total_received)); + + clear_bit(0, &priv->post_buckets_active); +} + +static void +mpt_lan_post_receive_buckets_work(struct work_struct *work) +{ + mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, + post_buckets_task.work)); +} + +static const struct net_device_ops mpt_netdev_ops = { + .ndo_open = mpt_lan_open, + .ndo_stop = mpt_lan_close, + .ndo_start_xmit = mpt_lan_sdu_send, + .ndo_tx_timeout = mpt_lan_tx_timeout, +}; + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static struct net_device * +mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) +{ + struct net_device *dev; + struct mpt_lan_priv *priv; + u8 HWaddr[FC_ALEN], *a; + + dev = alloc_fcdev(sizeof(struct mpt_lan_priv)); + if (!dev) + return NULL; + + dev->mtu = MPT_LAN_MTU; + + priv = netdev_priv(dev); + + priv->dev = dev; + priv->mpt_dev = mpt_dev; + priv->pnum = pnum; + + INIT_DELAYED_WORK(&priv->post_buckets_task, + mpt_lan_post_receive_buckets_work); + priv->post_buckets_active = 0; + + dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", + __LINE__, dev->mtu + dev->hard_header_len + 4)); + + atomic_set(&priv->buckets_out, 0); + priv->total_posted = 0; + priv->total_received = 0; + priv->max_buckets_out = max_buckets_out; + if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out) + priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets; + + dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n", + __LINE__, + mpt_dev->pfacts[0].MaxLanBuckets, + max_buckets_out, + priv->max_buckets_out)); + + priv->bucketthresh = priv->max_buckets_out * 2 / 3; + spin_lock_init(&priv->txfidx_lock); + spin_lock_init(&priv->rxfidx_lock); + + /* Grab pre-fetched LANPage1 stuff. :-) */ + a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow; + + HWaddr[0] = a[5]; + HWaddr[1] = a[4]; + HWaddr[2] = a[3]; + HWaddr[3] = a[2]; + HWaddr[4] = a[1]; + HWaddr[5] = a[0]; + + dev->addr_len = FC_ALEN; + memcpy(dev->dev_addr, HWaddr, FC_ALEN); + memset(dev->broadcast, 0xff, FC_ALEN); + + /* The Tx queue is 127 deep on the 909. + * Give ourselves some breathing room. + */ + priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ? + tx_max_out_p : MPT_TX_MAX_OUT_LIM; + + dev->netdev_ops = &mpt_netdev_ops; + dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; + + /* MTU range: 96 - 65280 */ + dev->min_mtu = MPT_LAN_MIN_MTU; + dev->max_mtu = MPT_LAN_MAX_MTU; + + dlprintk((KERN_INFO MYNAM ": Finished registering dev " + "and setting initial values\n")); + + if (register_netdev(dev) != 0) { + free_netdev(dev); + dev = NULL; + } + return dev; +} + +static int +mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + struct net_device *dev; + int i; + + for (i = 0; i < ioc->facts.NumberOfPorts; i++) { + printk(KERN_INFO MYNAM ": %s: PortNum=%x, " + "ProtocolFlags=%02Xh (%c%c%c%c)\n", + ioc->name, ioc->pfacts[i].PortNumber, + ioc->pfacts[i].ProtocolFlags, + MPT_PROTOCOL_FLAGS_c_c_c_c( + ioc->pfacts[i].ProtocolFlags)); + + if (!(ioc->pfacts[i].ProtocolFlags & + MPI_PORTFACTS_PROTOCOL_LAN)) { + printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol " + "seems to be disabled on this adapter port!\n", + ioc->name); + continue; + } + + dev = mpt_register_lan_device(ioc, i); + if (!dev) { + printk(KERN_ERR MYNAM ": %s: Unable to register " + "port%d as a LAN device\n", ioc->name, + ioc->pfacts[i].PortNumber); + continue; + } + + printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device " + "registered as '%s'\n", ioc->name, dev->name); + printk(KERN_INFO MYNAM ": %s/%s: " + "LanAddr = %pM\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + dev->dev_addr); + + ioc->netdev = dev; + + return 0; + } + + return -ENODEV; +} + +static void +mptlan_remove(struct pci_dev *pdev) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + struct net_device *dev = ioc->netdev; + + if(dev != NULL) { + unregister_netdev(dev); + free_netdev(dev); + } +} + +static struct mpt_pci_driver mptlan_driver = { + .probe = mptlan_probe, + .remove = mptlan_remove, +}; + +static int __init mpt_lan_init (void) +{ + show_mptmod_ver(LANAME, LANVER); + + LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER, + "lan_reply"); + if (LanCtx <= 0) { + printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n"); + return -EBUSY; + } + + dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx)); + + if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) { + printk(KERN_ERR MYNAM ": Eieee! unable to register a reset " + "handler with mptbase! The world is at an end! " + "Everything is fading to black! Goodbye.\n"); + return -EBUSY; + } + + dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); + + mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER); + return 0; +} + +static void __exit mpt_lan_exit(void) +{ + mpt_device_driver_deregister(MPTLAN_DRIVER); + mpt_reset_deregister(LanCtx); + + if (LanCtx) { + mpt_deregister(LanCtx); + LanCtx = MPT_MAX_PROTOCOL_DRIVERS; + } +} + +module_init(mpt_lan_init); +module_exit(mpt_lan_exit); + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static unsigned short +mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) +{ + struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; + struct fcllc *fcllc; + + skb_reset_mac_header(skb); + skb_pull(skb, sizeof(struct mpt_lan_ohdr)); + + if (fch->dtype == htons(0xffff)) { + u32 *p = (u32 *) fch; + + swab32s(p + 0); + swab32s(p + 1); + swab32s(p + 2); + swab32s(p + 3); + + printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n", + NETDEV_PTR_TO_IOC_NAME_s(dev)); + printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n", + fch->saddr); + } + + if (*fch->daddr & 1) { + if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) { + skb->pkt_type = PACKET_BROADCAST; + } else { + skb->pkt_type = PACKET_MULTICAST; + } + } else { + if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) { + skb->pkt_type = PACKET_OTHERHOST; + } else { + skb->pkt_type = PACKET_HOST; + } + } + + fcllc = (struct fcllc *)skb->data; + + /* Strip the SNAP header from ARP packets since we don't + * pass them through to the 802.2/SNAP layers. + */ + if (fcllc->dsap == EXTENDED_SAP && + (fcllc->ethertype == htons(ETH_P_IP) || + fcllc->ethertype == htons(ETH_P_ARP))) { + skb_pull(skb, sizeof(struct fcllc)); + return fcllc->ethertype; + } + + return htons(ETH_P_802_2); +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/mptsas-kmod/el8/mptlan.h b/mptsas-kmod/el8/mptlan.h new file mode 100644 index 00000000..8a24494f --- /dev/null +++ b/mptsas-kmod/el8/mptlan.h @@ -0,0 +1,129 @@ +/* + * linux/drivers/message/fusion/mptlan.h + * IP Over Fibre Channel device driver. + * For use with LSI Fibre Channel PCI chip/adapters + * running LSI Fusion MPT (Message Passing Technology) firmware. + * + * Copyright (c) 2000-2008 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +/* mptlan.h */ + +#ifndef LINUX_MPTLAN_H_INCLUDED +#define LINUX_MPTLAN_H_INCLUDED +/*****************************************************************************/ + +#if !defined(__GENKSYMS__) +#include +#endif + +#include +#include +// #include +#include +// #include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + /* Override mptbase.h by pre-defining these! */ +#define MODULEAUTHOR "LSI Corporation" + +#include "mptbase.h" + +/*****************************************************************************/ +#define LANAME "Fusion MPT LAN driver" +#define LANVER MPT_LINUX_VERSION_COMMON + +#ifdef MODULE +MODULE_AUTHOR(MODULEAUTHOR); +MODULE_DESCRIPTION(LANAME); +#endif +/*****************************************************************************/ + +#define MPT_LAN_MAX_BUCKETS_OUT 256 +#define MPT_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */ +#define MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH 10 +#define MPT_LAN_RX_COPYBREAK 200 +#define MPT_LAN_TX_TIMEOUT (1*HZ) +#define MPT_TX_MAX_OUT_LIM 127 + +#define MPT_LAN_MIN_MTU 96 /* RFC2625 */ +#define MPT_LAN_MAX_MTU 65280 /* RFC2625 */ +#define MPT_LAN_MTU 13312 /* Max perf range + lower mem + usage than 16128 */ + +#define MPT_LAN_NAA_RFC2625 0x1 +#define MPT_LAN_NAA_QLOGIC 0x2 + +/* MPT LAN Reset and Suspend Resource Flags Defines */ + +#define MPT_LAN_RESOURCE_FLAG_RETURN_POSTED_BUCKETS 0x01 +#define MPT_LAN_RESOURCE_FLAG_RETURN_PEND_TRANSMITS 0x02 + +/*****************************************************************************/ +#ifdef MPT_LAN_IO_DEBUG +#define dioprintk(x) printk x +#else +#define dioprintk(x) +#endif + +#ifdef MPT_LAN_DEBUG +#define dlprintk(x) printk x +#else +#define dlprintk(x) +#endif + +#define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)netdev_priv(d)) +#define NETDEV_PTR_TO_IOC_NAME_s(d) (NETDEV_TO_LANPRIV_PTR(d)->mpt_dev->name) +#define IOC_AND_NETDEV_NAMES_s_s(d) NETDEV_PTR_TO_IOC_NAME_s(d), (d)->name + +/*****************************************************************************/ +#endif + diff --git a/mptsas-kmod/el8/mptsas-kmod.spec b/mptsas-kmod/el8/mptsas-kmod.spec index 5122e858..742f2121 100644 --- a/mptsas-kmod/el8/mptsas-kmod.spec +++ b/mptsas-kmod/el8/mptsas-kmod.spec @@ -2,13 +2,13 @@ %define kmod_name mptsas # If kmod_kernel_version isn't defined on the rpmbuild line, define it here. -%{!?kmod_kernel_version: %define kmod_kernel_version 4.18.0-372.9.1.el8} +%{!?kmod_kernel_version: %define kmod_kernel_version 4.18.0-425.3.1.el8} %{!?dist: %define dist .el8} Name: kmod-%{kmod_name} Version: 3.04.20 -Release: 7%{?dist} +Release: 8%{?dist} Summary: %{kmod_name} kernel module(s) Group: System Environment/Kernel License: GPLv2 @@ -50,6 +50,9 @@ BuildRequires: redhat-rpm-config Provides: kernel-modules >= %{kmod_kernel_version}.%{_arch} Provides: kmod-%{kmod_name} = %{?epoch:%{epoch}:}%{version}-%{release} +# Combines and replaces kmod-mptspi +Provides: kmod-mptspi = %{?epoch:%{epoch}:}%{version}-%{release} +Obsoletes: kmod-mptspi < %{?epoch:%{epoch}:}%{version}-%{release} Requires(post): %{_sbindir}/weak-modules Requires(postun): %{_sbindir}/weak-modules @@ -63,6 +66,9 @@ of the same variant of the Linux kernel and not on any one specific build. %prep %setup -n %{kmod_name}-%{version} echo "override %{kmod_name} * weak-updates/%{kmod_name}" > kmod-%{kmod_name}.conf +echo "override mptctl * weak-updates/%{kmod_name}" >> kmod-%{kmod_name}.conf +echo "override mptfc * weak-updates/%{kmod_name}" >> kmod-%{kmod_name}.conf +echo "override mptspi * weak-updates/%{kmod_name}" >> kmod-%{kmod_name}.conf %build %{__make} -C %{kernel_source} %{?_smp_mflags} modules M=$PWD @@ -79,6 +85,9 @@ sort -u greylist | uniq > greylist.txt %install %{__install} -d %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ %{__install} %{kmod_name}.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ +%{__install} mptctl.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ +%{__install} mptfc.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ +%{__install} mptspi.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ %{__install} -d %{buildroot}%{_sysconfdir}/depmod.d/ %{__install} -m 0644 kmod-%{kmod_name}.conf %{buildroot}%{_sysconfdir}/depmod.d/ %{__install} -d %{buildroot}%{_defaultdocdir}/kmod-%{kmod_name}-%{version}/ @@ -176,6 +185,11 @@ exit 0 %doc /usr/share/doc/kmod-%{kmod_name}-%{version}/ %changelog +* Sat Jan 14 2023 Philip J Perry 3.04.20-8 +- Rebuilt against RHEL 8.7 kernel +- Obsoletes kmod-mptspi +- Added mptctl and mptfc modules [https://elrepo.org/bugs/view.php?id=1314] + * Tue May 10 2022 Philip J Perry 3.04.20-7 - Rebuilt for RHEL 8.6 diff --git a/mptsas-kmod/el9/Makefile b/mptsas-kmod/el9/Makefile index 78e19d97..ac5d5eae 100644 --- a/mptsas-kmod/el9/Makefile +++ b/mptsas-kmod/el9/Makefile @@ -31,5 +31,6 @@ else obj-m += mptbase.o mptscsih.o mptspi.o obj-m += mptbase.o mptscsih.o mptfc.o obj-m += mptbase.o mptscsih.o mptsas.o +obj-m += mptctl.o endif diff --git a/mptsas-kmod/el9/kmod-mptsas.spec b/mptsas-kmod/el9/kmod-mptsas.spec index c2b71c8c..ad07f6f9 100644 --- a/mptsas-kmod/el9/kmod-mptsas.spec +++ b/mptsas-kmod/el9/kmod-mptsas.spec @@ -8,7 +8,7 @@ Name: kmod-%{kmod_name} Version: 3.04.20 -Release: 2%{?dist} +Release: 3%{?dist} Summary: %{kmod_name} kernel module(s) Group: System Environment/Kernel License: GPLv2 @@ -80,6 +80,7 @@ of the same variant of the Linux kernel and not on any one specific build. %prep %setup -q -n %{kmod_name}-%{version} echo "override %{kmod_name} * weak-updates/%{kmod_name}" > kmod-%{kmod_name}.conf +echo "override mptctl * weak-updates/%{kmod_name}" >> kmod-%{kmod_name}.conf echo "override mptfc * weak-updates/%{kmod_name}" >> kmod-%{kmod_name}.conf echo "override mptspi * weak-updates/%{kmod_name}" >> kmod-%{kmod_name}.conf @@ -102,6 +103,7 @@ sort -u greylist | uniq > greylist.txt %install %{__install} -d %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ %{__install} %{kmod_name}.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ +%{__install} mptctl.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ %{__install} mptfc.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ %{__install} mptspi.ko %{buildroot}/lib/modules/%{kmod_kernel_version}.%{_arch}/extra/%{kmod_name}/ %{__install} -d %{buildroot}%{_sysconfdir}/depmod.d/ @@ -201,6 +203,9 @@ exit 0 %doc /usr/share/doc/kmod-%{kmod_name}-%{version}/ %changelog +* Sat Jan 14 2023 Philip J Perry - 3.04.20-3 +- Added mptctl module [https://elrepo.org/bugs/view.php?id=1315] + * Tue Nov 15 2022 Philip J Perry - 3.04.20-2 - Rebuilt for RHEL 9.1 - Source updated from RHEL 9.1 kernel