diff --git a/COPYING-5.14.0-570.42.2.el9_6 b/COPYING-5.14.0-570.49.1.el9_6 similarity index 100% rename from COPYING-5.14.0-570.42.2.el9_6 rename to COPYING-5.14.0-570.49.1.el9_6 diff --git a/Makefile.rhelver b/Makefile.rhelver index 2bf3b1df48677..1578ada719273 100644 --- a/Makefile.rhelver +++ b/Makefile.rhelver @@ -12,7 +12,7 @@ RHEL_MINOR = 6 # # Use this spot to avoid future merge conflicts. # Do not trim this comment. -RHEL_RELEASE = 570.42.2 +RHEL_RELEASE = 570.49.1 # # ZSTREAM diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index daa82012c6fdb..c41500c5c0a60 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1003,7 +1003,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, return 0; } - +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (radix_enabled()) @@ -1011,6 +1011,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) return false; } +#endif int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) @@ -1147,6 +1148,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in pmd_t *pmd; pte_t *pte; + /* + * Make sure we align the start vmemmap addr so that we calculate + * the correct start_pfn in altmap boundary check to decided whether + * we should use altmap or RAM based backing memory allocation. Also + * the address need to be aligned for set_pte operation. + + * If the start addr is already PMD_SIZE aligned we will try to use + * a pmd mapping. We don't want to be too aggressive here beacause + * that will cause more allocations in RAM. So only if the namespace + * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. + */ + + start = ALIGN_DOWN(start, PAGE_SIZE); for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); @@ -1172,8 +1186,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ - if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || - altmap_cross_boundary(altmap, addr, PMD_SIZE))) { + if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && + altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings * covering things outside the device. diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/0620efe7.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/0620efe7.failed new file mode 100644 index 0000000000000..28c5530baff6b --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/0620efe7.failed @@ -0,0 +1,397 @@ +scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Arun Easi +commit 0620efe789a73586b5b3ed38b27d1b69b2150958 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/0620efe7.failed + +IS_FNIC_FCP_INITIATOR macro is not applicable at this time. Delete the +macro. + + Suggested-by: Dan Carpenter + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Signed-off-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250110091655.17643-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 0620efe789a73586b5b3ed38b27d1b69b2150958) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,6c5f6046b1f5..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -88,16 -80,100 +88,90 @@@ + #define FNIC_DEV_RST_TERM_DONE BIT(20) + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + +++<<<<<<< HEAD +++======= ++ #define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */ ++ #define FNIC_FCOE_MAX_CMD_LEN 16 ++ /* Retry supported by rport (returned by PRLI service parameters) */ ++ #define FNIC_FC_RP_FLAGS_RETRY 0x1 ++ ++ /* Cisco vendor id */ ++ #define PCI_VENDOR_ID_CISCO 0x1137 ++ #define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ ++ ++ /* sereno pcie switch */ ++ #define PCI_DEVICE_ID_CISCO_SERENO 0x004e ++ #define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ ++ #define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ ++ #define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ ++ ++ /* Sereno */ ++ #define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ ++ #define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ ++ #define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ ++ ++ /* Cruz */ ++ #define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ ++ /* Cruz MountTian SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b ++ #define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ ++ /* Cruz MountTian2 SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 ++ #define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ ++ ++ /* Bodega */ ++ /* VIC 1457 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 ++ #define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ ++ /* VIC 1487 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a ++ #define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ ++ /* VIC 1440 Mezz mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 ++ #define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ ++ ++ /* Beverly */ ++ #define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ ++ #define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ ++ #define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ ++ #define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ ++ #define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ ++ #define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ ++ #define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ ++ ++ struct fnic_pcie_device { ++ u32 device; ++ u8 *desc; ++ u32 subsystem_device; ++ u8 *subsys_desc; ++ }; ++ +++>>>>>>> 0620efe789a7 (scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro) + /* + - * fnic private data per SCSI command. + + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. + */ + -struct fnic_cmd_priv { + - struct fnic_io_req *io_req; + - enum fnic_ioreq_state state; + - u32 flags; + - u16 abts_status; + - u16 lr_status; + -}; + - + -static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) + -{ + - return scsi_cmd_priv(cmd); + -} + - + -static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) + -{ + - struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + - + - return ((u64)fcmd->flags << 32) | fcmd->state; + -} + +#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) + +#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) + +#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) + +#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) + +#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) + +#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) + + #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,3a900d540f21..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -854,58 -959,89 +854,84 @@@ static int fnic_probe(struct pci_dev *p + } + } + + - init_completion(&fnic->reset_completion_wait); + + /* + + * Initialization done with PCI system, hardware, firmware. + + * Add host to SCSI + + */ + + err = scsi_add_host(lp->host, &pdev->dev); + + if (err) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic: scsi_add_host failed...exiting\n"); + + goto err_out_free_rq_buf; + + } + + - /* Start local port initialization */ + - iport->max_flogi_retries = fnic->config.flogi_retries; + - iport->max_plogi_retries = fnic->config.plogi_retries; + - iport->plogi_timeout = fnic->config.plogi_timeout; + - iport->service_params = + - (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS | + - FNIC_FCP_SP_CONF_CMPL); + - if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + - iport->service_params |= FNIC_FCP_SP_RETRY; + + /* Start local port initiatialization */ + + - iport->boot_time = jiffies; + - iport->e_d_tov = fnic->config.ed_tov; + - iport->r_a_tov = fnic->config.ra_tov; + - iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT; + - iport->wwpn = fnic->config.port_wwn; + - iport->wwnn = fnic->config.node_wwn; + + lp->link_up = 0; + + - iport->max_payload_size = fnic->config.maxdatafieldsize; + + lp->max_retry_count = fnic->config.flogi_retries; + + lp->max_rport_retry_count = fnic->config.plogi_retries; + + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + + FCP_SPPF_CONF_COMPL); + + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + + lp->service_params |= FCP_SPPF_RETRY; + + - if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) || + - (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) || + - ((iport->max_payload_size % 4) != 0)) { + - iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN; + - } + + lp->boot_time = jiffies; + + lp->e_d_tov = fnic->config.ed_tov; + + lp->r_a_tov = fnic->config.ra_tov; + + lp->link_supported_speeds = FC_PORTSPEED_10GBIT; + + fc_set_wwnn(lp, fnic->config.node_wwn); + + fc_set_wwpn(lp, fnic->config.port_wwn); + + - iport->flags |= FNIC_FIRST_LINK_UP; + + fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); + + - timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback, + - 0); + + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, + + FCPIO_HOST_EXCH_RANGE_END, NULL)) { + + err = -ENOMEM; + + goto err_out_remove_scsi_host; + + } + + + fc_lport_init_stats(lp); + fnic->stats_reset_time = jiffies; + + - INIT_WORK(&fnic->link_work, fnic_handle_link); + - INIT_WORK(&fnic->frame_work, fnic_handle_frame); + - INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); + - INIT_WORK(&fnic->flush_work, fnic_flush_tx); + - + - INIT_LIST_HEAD(&fnic->frame_queue); + - INIT_LIST_HEAD(&fnic->tx_queue); + - INIT_LIST_HEAD(&fnic->tport_event_list); + + fc_lport_config(lp); + + - INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry, + - fdls_schedule_oxid_free_retry_work); + - + - /* Initialize the oxid reclaim list and work struct */ + - INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list); + - INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler); + - + - /* Enable all queues */ + - for (i = 0; i < fnic->raw_wq_count; i++) + - vnic_wq_enable(&fnic->wq[i]); + - for (i = 0; i < fnic->rq_count; i++) { + - if (!ioread32(&fnic->rq[i].ctrl->enable)) + - vnic_rq_enable(&fnic->rq[i]); + + if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + + + sizeof(struct fc_frame_header))) { + + err = -EINVAL; + + goto err_out_free_exch_mgr; + } + - for (i = 0; i < fnic->wq_copy_count; i++) + - vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); + + fc_host_maxframe_size(lp->host) = lp->mfs; + + fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; + +++<<<<<<< HEAD + + sprintf(fc_host_symbolic_name(lp->host), + + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); +++======= ++ vnic_dev_enable(fnic->vdev); ++ ++ err = fnic_request_intr(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); ++ goto err_out_fnic_request_intr; ++ } ++ ++ fnic_notify_timer_start(fnic); ++ ++ fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE)); ++ ++ if (fnic_scsi_drv_init(fnic)) ++ goto err_out_scsi_drv_init; ++ ++ err = fnic_stats_debugfs_init(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n"); ++ goto err_out_free_stats_debugfs; ++ } ++ ++ for (i = 0; i < fnic->intr_count; i++) ++ vnic_intr_unmask(&fnic->intr[i]); +++>>>>>>> 0620efe789a7 (scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro) + + spin_lock_irqsave(&fnic_list_lock, flags); + list_add_tail(&fnic->list, &fnic_list); +@@@ -954,29 -1070,34 +980,39 @@@ err_out_free_max_pool + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); + err_out_free_dflt_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); + +err_out_free_ioreq_pool: + + mempool_destroy(fnic->io_req_pool); + err_out_free_resources: + fnic_free_vnic_resources(fnic); + -err_out_fnic_alloc_vnic_res: + +err_out_clear_intr: + fnic_clear_intr_mode(fnic); +++<<<<<<< HEAD + +err_out_dev_close: +++======= ++ err_out_fnic_set_intr_mode: ++ scsi_host_put(fnic->host); ++ err_out_fnic_role: ++ err_out_scsi_host_alloc: ++ err_out_fnic_get_config: ++ err_out_dev_mac_addr: ++ err_out_dev_init: +++>>>>>>> 0620efe789a7 (scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro) + vnic_dev_close(fnic->vdev); + -err_out_dev_open: + -err_out_dev_cmd_init: + +err_out_dev_cmd_deinit: + +err_out_vnic_unregister: + vnic_dev_unregister(fnic->vdev); + -err_out_dev_register: + +err_out_iounmap: + fnic_iounmap(fnic); + -err_out_fnic_map_bar: + -err_out_map_bar: + -err_out_set_dma_mask: + +err_out_release_regions: + pci_release_regions(pdev); + -err_out_pci_request_regions: + +err_out_disable_device: + pci_disable_device(pdev); + -err_out_pci_enable_device: + +err_out_free_hba: + + fnic_stats_debugfs_remove(fnic); + ida_free(&fnic_ida, fnic->fnic_num); + err_out_ida_alloc: + - kfree(fnic); + -err_out_fnic_alloc: + + scsi_host_put(lp->host); + +err_out: + return err; + } + +@@@ -1004,29 -1123,25 +1040,37 @@@ static void fnic_remove(struct pci_dev + * be no event queued for this fnic device in the workqueue + */ + flush_workqueue(fnic_event_queue); +++<<<<<<< HEAD + + skb_queue_purge(&fnic->frame_queue); + + skb_queue_purge(&fnic->tx_queue); +++======= ++ ++ fnic_scsi_unload(fnic); ++ ++ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) ++ del_timer_sync(&fnic->notify_timer); +++>>>>>>> 0620efe789a7 (scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro) + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + - del_timer_sync(&fnic->retry_fip_timer); + - del_timer_sync(&fnic->fcs_ka_timer); + - del_timer_sync(&fnic->enode_ka_timer); + - del_timer_sync(&fnic->vn_ka_timer); + - + - fnic_free_txq(&fnic->fip_frame_queue); + + del_timer_sync(&fnic->fip_timer); + + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + + fnic_fcoe_evlist_free(fnic); + } + + - if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) + - del_timer_sync(&fnic->iport.fabric.fdmi_timer); + + /* + + * Log off the fabric. This stops all remote ports, dns port, + + * logs off the fabric. This flushes all rport, disc, lport work + + * before returning + + */ + + fc_fabric_logoff(fnic->lport); + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + fnic->in_remove = 1; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + fcoe_ctlr_destroy(&fnic->ctlr); + + fc_lport_destroy(lp); + fnic_stats_debugfs_remove(fnic); + + /* +@@@ -1055,8 -1167,11 +1099,14 @@@ + fnic_iounmap(fnic); + pci_release_regions(pdev); + pci_disable_device(pdev); + - pci_set_drvdata(pdev, NULL); + ida_free(&fnic_ida, fnic->fnic_num); +++<<<<<<< HEAD + + scsi_host_put(lp->host); +++======= ++ fnic_scsi_unload_cleanup(fnic); ++ scsi_host_put(fnic->host); ++ kfree(fnic); +++>>>>>>> 0620efe789a7 (scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro) + } + + static struct pci_driver fnic_driver = { +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fnic.h +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/098585aa.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/098585aa.failed new file mode 100644 index 0000000000000..0fbed1873315b --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/098585aa.failed @@ -0,0 +1,2427 @@ +scsi: fnic: Add and integrate support for FIP + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 098585aa8acab3fcd46ce908af84ef168f5ccab6 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/098585aa.failed + +Add and integrate support for FCoE Initialization (protocol) FIP. This +protocol will be exercised on Cisco UCS rack servers. + +Add support to specifically print FIP related debug messages. + +Replace existing definitions to handle new data structures. + +Clean up old and obsolete definitions. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202409291955.FcMZfNSt-lkp@intel.com/ +Closes: https://lore.kernel.org/oe-kbuild-all/202412081904.pXwdx15J-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-9-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 098585aa8acab3fcd46ce908af84ef168f5ccab6) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_fcs.c +# drivers/scsi/fnic/fnic_fip.h +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,64606fac14ea..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -143,17 -223,29 +143,27 @@@ do { + } while (0); \ + } while (0) + + -#define FNIC_MAIN_DBG(kern_level, host, fnic_num, fmt, args...) \ + +#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \ + - shost_printk(kern_level, host, \ + - "fnic<%d>: %s: %d: " fmt, fnic_num,\ + - __func__, __LINE__, ##args);) + + shost_printk(kern_level, host, fmt, ##args);) + + -#define FNIC_FCS_DBG(kern_level, host, fnic_num, fmt, args...) \ + +#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + - shost_printk(kern_level, host, \ + - "fnic<%d>: %s: %d: " fmt, fnic_num,\ + - __func__, __LINE__, ##args);) + + shost_printk(kern_level, host, fmt, ##args);) + +++<<<<<<< HEAD + +#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ +++======= ++ #define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \ ++ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ ++ shost_printk(kern_level, host, \ ++ "fnic<%d>: %s: %d: " fmt, fnic_num,\ ++ __func__, __LINE__, ##args);) ++ ++ #define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \ +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ + - shost_printk(kern_level, host, \ + - "fnic<%d>: %s: %d: " fmt, fnic_num,\ + - __func__, __LINE__, ##args);) + + shost_printk(kern_level, host, fmt, ##args);) + + #define FNIC_ISR_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ +@@@ -388,8 -508,7 +397,12 @@@ int fnic_is_abts_pending(struct fnic * + void fnic_handle_fip_frame(struct work_struct *work); + void fnic_handle_fip_event(struct fnic *fnic); + void fnic_fcoe_reset_vlans(struct fnic *fnic); +++<<<<<<< HEAD + +void fnic_fcoe_evlist_free(struct fnic *fnic); + +extern void fnic_handle_fip_timer(struct fnic *fnic); +++======= ++ extern void fnic_handle_fip_timer(struct timer_list *t); +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + + static inline int + fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,1db689e98d88..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -26,25 -14,99 +26,111 @@@ + #include + #include + #include +- #include + #include + #include + -#include + #include "fnic_io.h" + #include "fnic.h" +++<<<<<<< HEAD + +#include "fnic_fip.h" +++======= ++ #include "fnic_fdls.h" ++ #include "fdls_fc.h" +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + #include "cq_enet_desc.h" + #include "cq_exch_desc.h" ++ #include "fip.h" + +- static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; +- struct workqueue_struct *fnic_fip_queue; ++ extern struct workqueue_struct *fnic_fip_queue; + struct workqueue_struct *fnic_event_queue; + +++<<<<<<< HEAD + +static void fnic_set_eth_mode(struct fnic *); + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +++======= ++ static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; ++ ++ /* ++ * Internal Functions ++ * This function will initialize the src_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, ++ uint8_t *src_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ src_mac[0], src_mac[1], src_mac[2], src_mac[3], ++ src_mac[4], src_mac[5]); ++ ++ memcpy(fnic->iport.fpma, src_mac, 6); ++ } ++ ++ /* ++ * This function will initialize the dst_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, ++ uint8_t *dst_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], ++ dst_mac[4], dst_mac[5]); ++ ++ memcpy(fnic->iport.fcfmac, dst_mac, 6); ++ } ++ ++ /* ++ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp ++ * or derive from FC_MAP and FCID combination. While it should be ++ * same, revisit this if there is any possibility of not-correct. ++ */ ++ void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, ++ uint8_t *fcid) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; ++ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; ++ ++ memcpy(&fcmac[3], fcid, 3); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ ethhdr->h_dest[0], ethhdr->h_dest[1], ++ ethhdr->h_dest[2], ethhdr->h_dest[3], ++ ethhdr->h_dest[4], ethhdr->h_dest[5]); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], ++ fcmac[5]); ++ ++ fnic_fdls_set_fcoe_srcmac(fnic, fcmac); ++ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); ++ } ++ ++ void fnic_fdls_init(struct fnic *fnic, int usefip) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ /* Initialize iPort structure */ ++ iport->state = FNIC_IPORT_STATE_INIT; ++ iport->fnic = fnic; ++ iport->usefip = usefip; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", ++ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], ++ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); ++ ++ INIT_LIST_HEAD(&iport->tport_list); ++ INIT_LIST_HEAD(&iport->tport_list_pending_del); ++ } +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + + void fnic_handle_link(struct work_struct *work) + { +@@@ -178,11 -246,6 +264,14 @@@ + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_DOWN", + strlen("Link Status: UP_DOWN")); +++<<<<<<< HEAD + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "deleting fip-timer during link-down\n"); + + del_timer_sync(&fnic->fip_timer); + + } +++======= +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + fcoe_ctlr_link_down(&fnic->ctlr); + } + +@@@ -225,399 -288,25 +314,406 @@@ void fnic_handle_frame(struct work_stru + } + } + +++<<<<<<< HEAD + +void fnic_fcoe_evlist_free(struct fnic *fnic) + +{ + + struct fnic_event *fevt = NULL; + + struct fnic_event *next = NULL; + + unsigned long flags; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (list_empty(&fnic->evlist)) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + + list_del(&fevt->list); + + kfree(fevt); + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +} + + + +void fnic_handle_event(struct work_struct *work) + +{ + + struct fnic *fnic = container_of(work, struct fnic, event_work); + + struct fnic_event *fevt = NULL; + + struct fnic_event *next = NULL; + + unsigned long flags; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (list_empty(&fnic->evlist)) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + + if (fnic->stop_rx_link_events) { + + list_del(&fevt->list); + + kfree(fevt); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + /* + + * If we're in a transitional state, just re-queue and return. + + * The queue will be serviced when we get to a stable state. + + */ + + if (fnic->state != FNIC_IN_FC_MODE && + + fnic->state != FNIC_IN_ETH_MODE) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + list_del(&fevt->list); + + switch (fevt->event) { + + case FNIC_EVT_START_VLAN_DISC: + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fcoe_send_vlan_req(fnic); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + break; + + case FNIC_EVT_START_FCF_DISC: + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "Start FCF Discovery\n"); + + fnic_fcoe_start_fcf_disc(fnic); + + break; + + default: + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "Unknown event 0x%x\n", fevt->event); + + break; + + } + + kfree(fevt); + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +} + + + +/** + + * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected + + * @fip: The FCoE controller that received the frame + + * @skb: The received FIP frame + + * + + * Returns non-zero if the frame is rejected with unsupported cmd with + + * insufficient resource els explanation. + + */ + +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + + struct sk_buff *skb) + +{ + + struct fc_lport *lport = fip->lp; + + struct fip_header *fiph; + + struct fc_frame_header *fh = NULL; + + struct fip_desc *desc; + + struct fip_encaps *els; + + u16 op; + + u8 els_op; + + u8 sub; + + + + size_t rlen; + + size_t dlen = 0; + + + + if (skb_linearize(skb)) + + return 0; + + + + if (skb->len < sizeof(*fiph)) + + return 0; + + + + fiph = (struct fip_header *)skb->data; + + op = ntohs(fiph->fip_op); + + sub = fiph->fip_subcode; + + + + if (op != FIP_OP_LS) + + return 0; + + + + if (sub != FIP_SC_REP) + + return 0; + + + + rlen = ntohs(fiph->fip_dl_len) * 4; + + if (rlen + sizeof(*fiph) > skb->len) + + return 0; + + + + desc = (struct fip_desc *)(fiph + 1); + + dlen = desc->fip_dlen * FIP_BPW; + + + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + + + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + + return 0; + + + + els = (struct fip_encaps *)desc; + + fh = (struct fc_frame_header *)(els + 1); + + + + if (!fh) + + return 0; + + + + /* + + * ELS command code, reason and explanation should be = Reject, + + * unsupported command and insufficient resource + + */ + + els_op = *(u8 *)(fh + 1); + + if (els_op == ELS_LS_RJT) { + + shost_printk(KERN_INFO, lport->host, + + "Flogi Request Rejected by Switch\n"); + + return 1; + + } + + shost_printk(KERN_INFO, lport->host, + + "Flogi Request Accepted by Switch\n"); + + } + + return 0; + +} + + + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) + +{ + + struct fcoe_ctlr *fip = &fnic->ctlr; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + struct sk_buff *skb; + + char *eth_fr; + + struct fip_vlan *vlan; + + u64 vlan_tov; + + + + fnic_fcoe_reset_vlans(fnic); + + fnic->set_vlan(fnic, 0); + + + + if (printk_ratelimit()) + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Sending VLAN request...\n"); + + + + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + + if (!skb) + + return; + + + + eth_fr = (char *)skb->data; + + vlan = (struct fip_vlan *)eth_fr; + + + + memset(vlan, 0, sizeof(*vlan)); + + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + + vlan->eth.h_proto = htons(ETH_P_FIP); + + + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + + vlan->fip.fip_op = htons(FIP_OP_VLAN); + + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); + + + + skb_put(skb, sizeof(*vlan)); + + skb->protocol = htons(ETH_P_FIP); + + skb_reset_mac_header(skb); + + skb_reset_network_header(skb); + + fip->send(fip, skb); + + + + /* set a timer so that we can retry if there no response */ + + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); + +} + + + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) + +{ + + struct fcoe_ctlr *fip = &fnic->ctlr; + + struct fip_header *fiph; + + struct fip_desc *desc; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + u16 vid; + + size_t rlen; + + size_t dlen; + + struct fcoe_vlan *vlan; + + u64 sol_time; + + unsigned long flags; + + + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Received VLAN response...\n"); + + + + fiph = (struct fip_header *) skb->data; + + + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + + ntohs(fiph->fip_op), fiph->fip_subcode); + + + + rlen = ntohs(fiph->fip_dl_len) * 4; + + fnic_fcoe_reset_vlans(fnic); + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + desc = (struct fip_desc *)(fiph + 1); + + while (rlen > 0) { + + dlen = desc->fip_dlen * FIP_BPW; + + switch (desc->fip_dtype) { + + case FIP_DT_VLAN: + + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + + shost_printk(KERN_INFO, fnic->lport->host, + + "process_vlan_resp: FIP VLAN %d\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); + + if (!vlan) { + + /* retry from timer */ + + spin_unlock_irqrestore(&fnic->vlans_lock, + + flags); + + goto out; + + } + + vlan->vid = vid & 0x0fff; + + vlan->state = FIP_VLAN_AVAIL; + + list_add_tail(&vlan->list, &fnic->vlans); + + break; + + } + + desc = (struct fip_desc *)((char *)desc + dlen); + + rlen -= dlen; + + } + + + + /* any VLAN descriptors present ? */ + + if (list_empty(&fnic->vlans)) { + + /* retry from timer */ + + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "No VLAN descriptors in FIP VLAN response\n"); + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + goto out; + + } + + + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + fnic->set_vlan(fnic, vlan->vid); + + vlan->state = FIP_VLAN_SENT; /* sent now */ + + vlan->sol_count++; + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + + + /* start the solicitation */ + + fcoe_ctlr_link_up(fip); + + + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + +out: + + return; + +} + + + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) + +{ + + unsigned long flags; + + struct fcoe_vlan *vlan; + + u64 sol_time; + + + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + fnic->set_vlan(fnic, vlan->vid); + + vlan->state = FIP_VLAN_SENT; /* sent now */ + + vlan->sol_count = 1; + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + + + /* start the solicitation */ + + fcoe_ctlr_link_up(&fnic->ctlr); + + + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + +} + + + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) + +{ + + unsigned long flags; + + struct fcoe_vlan *fvlan; + + + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (list_empty(&fnic->vlans)) { + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + return -EINVAL; + + } + + + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + if (fvlan->state == FIP_VLAN_USED) { + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + return 0; + + } + + + + if (fvlan->state == FIP_VLAN_SENT) { + + fvlan->state = FIP_VLAN_USED; + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + return 0; + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + return -EINVAL; + +} + + + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) + +{ + + struct fnic_event *fevt; + + unsigned long flags; + + + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + + if (!fevt) + + return; + + + + fevt->fnic = fnic; + + fevt->event = ev; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + list_add_tail(&fevt->list, &fnic->evlist); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + schedule_work(&fnic->event_work); + +} + + + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) + +{ + + struct fip_header *fiph; + + int ret = 1; + + u16 op; + + u8 sub; + + + + if (!skb || !(skb->data)) + + return -1; + + + + if (skb_linearize(skb)) + + goto drop; + + + + fiph = (struct fip_header *)skb->data; + + op = ntohs(fiph->fip_op); + + sub = fiph->fip_subcode; + + + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + + goto drop; + + + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + + goto drop; + + + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + + goto drop; + + /* pass it on to fcoe */ + + ret = 1; + + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { + + /* set the vlan as used */ + + fnic_fcoe_process_vlan_resp(fnic, skb); + + ret = 0; + + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + + /* received CVL request, restart vlan disc */ + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + /* pass it on to fcoe */ + + ret = 1; + + } + +drop: + + return ret; + +} + + +++======= +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + void fnic_handle_fip_frame(struct work_struct *work) + { ++ struct fnic_frame_list *cur_frame, *next; + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); +- struct fnic_stats *fnic_stats = &fnic->fnic_stats; +- unsigned long flags; +- struct sk_buff *skb; +- struct ethhdr *eh; + +- while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { +- spin_lock_irqsave(&fnic->fnic_lock, flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Processing FIP frame\n"); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue, ++ links) { + if (fnic->stop_rx_link_events) { +- spin_unlock_irqrestore(&fnic->fnic_lock, flags); +- dev_kfree_skb(skb); ++ list_del(&cur_frame->links); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ kfree(cur_frame->fp); ++ kfree(cur_frame); + return; + } ++ + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. +@@@ -733,98 -371,18 +778,109 @@@ void fnic_update_mac_locked(struct fni + new = ctl; + if (ether_addr_equal(data, new)) + return; +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); +++======= ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Update MAC: %u\n", *new); ++ +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) + vnic_dev_del_addr(fnic->vdev, data); ++ + memcpy(data, new, ETH_ALEN); + if (!ether_addr_equal(new, ctl)) + vnic_dev_add_addr(fnic->vdev, new); + } + +++<<<<<<< HEAD + +/** + + * fnic_update_mac() - set data MAC address and filters. + + * @lport: local port. + + * @new: newly-assigned FCoE MAC address. + + */ + +void fnic_update_mac(struct fc_lport *lport, u8 *new) + +{ + + struct fnic *fnic = lport_priv(lport); + + + + spin_lock_irq(&fnic->fnic_lock); + + fnic_update_mac_locked(fnic, new); + + spin_unlock_irq(&fnic->fnic_lock); + +} + + + +/** + + * fnic_set_port_id() - set the port_ID after successful FLOGI. + + * @lport: local port. + + * @port_id: assigned FC_ID. + + * @fp: received frame containing the FLOGI accept or NULL. + + * + + * This is called from libfc when a new FC_ID has been assigned. + + * This causes us to reset the firmware to FC_MODE and setup the new MAC + + * address and FC_ID. + + * + + * It is also called with FC_ID 0 when we're logged off. + + * + + * If the FC_ID is due to point-to-point, fp may be NULL. + + */ + +void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) + +{ + + struct fnic *fnic = lport_priv(lport); + + u8 *mac; + + int ret; + + + + FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", + + port_id, fp); + + + + /* + + * If we're clearing the FC_ID, change to use the ctl_src_addr. + + * Set ethernet mode to send FLOGI. + + */ + + if (!port_id) { + + fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); + + fnic_set_eth_mode(fnic); + + return; + + } + + + + if (fp) { + + mac = fr_cb(fp)->granted_mac; + + if (is_zero_ether_addr(mac)) { + + /* non-FIP - FLOGI already accepted - ignore return */ + + fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); + + } + + fnic_update_mac(lport, mac); + + } + + + + /* Change state to reflect transition to FC mode */ + + spin_lock_irq(&fnic->fnic_lock); + + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) + + fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; + + else { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "Unexpected fnic state %s while" + + " processing flogi resp\n", + + fnic_state_to_str(fnic->state)); + + spin_unlock_irq(&fnic->fnic_lock); + + return; + + } + + spin_unlock_irq(&fnic->fnic_lock); + + + + /* + + * Send FLOGI registration to firmware to set up FC mode. + + * The new address will be set up when registration completes. + + */ + + ret = fnic_flogi_reg_handler(fnic, port_id); + + + + if (ret < 0) { + + spin_lock_irq(&fnic->fnic_lock); + + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) + + fnic->state = FNIC_IN_ETH_MODE; + + spin_unlock_irq(&fnic->fnic_lock); + + } + +} + + +++======= +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc + *cq_desc, struct vnic_rq_buf *buf, + int skipped __attribute__((unused)), +@@@ -856,54 -418,58 +916,72 @@@ + + cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); + if (type == CQ_DESC_TYPE_RQ_FCP) { +- cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, +- &type, &color, &q_number, &completed_index, +- &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, +- &tmpl, &fcp_bytes_written, &sof, &eof, +- &ingress_port, &packet_error, +- &fcoe_enc_error, &fcs_ok, &vlan_stripped, +- &vlan); +- skb_trim(skb, fcp_bytes_written); +- fr_sof(fp) = sof; +- fr_eof(fp) = eof; +- ++ cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type, ++ &color, &q_number, &completed_index, &eop, &sop, ++ &fcoe_fnic_crc_ok, &exchange_id, &tmpl, ++ &fcp_bytes_written, &sof, &eof, &ingress_port, ++ &packet_error, &fcoe_enc_error, &fcs_ok, ++ &vlan_stripped, &vlan); ++ ethhdr_stripped = 1; ++ bytes_written = fcp_bytes_written; + } else if (type == CQ_DESC_TYPE_RQ_ENET) { +- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, +- &type, &color, &q_number, &completed_index, +- &ingress_port, &fcoe, &eop, &sop, +- &rss_type, &csum_not_calc, &rss_hash, +- &bytes_written, &packet_error, +- &vlan_stripped, &vlan, &checksum, +- &fcoe_sof, &fcoe_fc_crc_ok, +- &fcoe_enc_error, &fcoe_eof, +- &tcp_udp_csum_ok, &udp, &tcp, +- &ipv4_csum_ok, &ipv6, &ipv4, +- &ipv4_fragment, &fcs_ok); +- skb_trim(skb, bytes_written); ++ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type, ++ &color, &q_number, &completed_index, ++ &ingress_port, &fcoe, &eop, &sop, &rss_type, ++ &csum_not_calc, &rss_hash, &enet_bytes_written, ++ &packet_error, &vlan_stripped, &vlan, ++ &checksum, &fcoe_sof, &fcoe_fnic_crc_ok, ++ &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, ++ &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, ++ &ipv4_fragment, &fcs_ok); ++ ++ ethhdr_stripped = 0; ++ bytes_written = enet_bytes_written; ++ + if (!fcs_ok) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fcs error. dropping packet.\n"); +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p fcs error. Dropping packet.\n", fnic); +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + goto drop; + } +- if (fnic_import_rq_eth_pkt(fnic, skb)) +- return; ++ eh = (struct ethhdr *) fp; ++ if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) { + ++ if (fnic_import_rq_eth_pkt(fnic, fp)) ++ return; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Dropping h_proto 0x%x", ++ be16_to_cpu(eh->h_proto)); ++ goto drop; ++ } + } else { +- /* wrong CQ type*/ +- shost_printk(KERN_ERR, fnic->lport->host, +- "fnic rq_cmpl wrong cq type x%x\n", type); ++ /* wrong CQ type */ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic rq_cmpl wrong cq type x%x\n", type); + goto drop; + } + +- if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { ++ if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic rq_cmpl fcoe x%x fcsok x%x" + + " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" + + " x%x\n", + + fcoe, fcs_ok, packet_error, + + fcoe_fc_crc_ok, fcoe_enc_error); +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n", ++ fcoe, fcs_ok, packet_error, ++ fcoe_fnic_crc_ok, fcoe_enc_error); +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + goto drop; + } + +@@@ -1174,15 -666,65 +1206,56 @@@ int fnic_send(struct fc_lport *lp, stru + * Queue frame if in a transitional state. + * This occurs while registering the Port_ID / MAC address after FLOGI. + */ + - if ((fnic->state != FNIC_IN_FC_MODE) + - && (fnic->state != FNIC_IN_ETH_MODE)) { + - frame_elem = mempool_alloc(fnic->frame_elem_pool, + - GFP_ATOMIC | __GFP_ZERO); + - if (!frame_elem) { + - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + - "Failed to allocate memory for frame elem"); + - return -ENOMEM; + - } + - + - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + - "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n", + - ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id), + - fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + - + - frame_elem->fp = frame; + - frame_elem->frame_len = len; + - list_add_tail(&frame_elem->links, &fnic->tx_queue); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { + + skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return 0; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +++<<<<<<< HEAD + + return fnic_send_frame(fnic, fp); +++======= ++ fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing"); ++ ++ ret = fnic_send_frame(fnic, frame, len); ++ return ret; ++ } ++ ++ void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, ++ int frame_size) ++ { ++ struct fnic *fnic = iport->fnic; ++ uint8_t *dstmac, *srcmac; ++ ++ /* If module unload is in-progress, don't send */ ++ if (fnic->in_remove) ++ return; ++ ++ if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) { ++ srcmac = iport->fpma; ++ dstmac = iport->fcfmac; ++ } else { ++ srcmac = iport->hwmac; ++ dstmac = FCOE_ALL_FCF_MAC; ++ } ++ ++ fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac); ++ } ++ ++ int ++ fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame, ++ int frame_size) ++ { ++ struct fnic *fnic = iport->fnic; ++ ++ if (fnic->in_remove) ++ return -1; ++ ++ fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing"); ++ return fnic_send_frame(fnic, frame, frame_size); +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + } + + /** +@@@ -1207,42 -754,62 +1280,101 @@@ void fnic_flush_tx(struct work_struct * + } + } + +++<<<<<<< HEAD + +/** + + * fnic_set_eth_mode() - put fnic into ethernet mode. + + * @fnic: fnic device + + * + + * Called without fnic lock held. + + */ + +static void fnic_set_eth_mode(struct fnic *fnic) + +{ + + unsigned long flags; + + enum fnic_state old_state; + + int ret; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + +again: + + old_state = fnic->state; + + switch (old_state) { + + case FNIC_IN_FC_MODE: + + case FNIC_IN_ETH_TRANS_FC_MODE: + + default: + + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + ret = fnic_fw_reset_handler(fnic); + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) + + goto again; + + if (ret) + + fnic->state = old_state; + + break; + + + + case FNIC_IN_FC_TRANS_ETH_MODE: + + case FNIC_IN_ETH_MODE: + + break; + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++======= ++ int ++ fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, ++ void *fp) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct ethhdr *ethhdr; ++ int ret; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id, ++ fp, fnic->state); ++ ++ if (fp) { ++ ethhdr = (struct ethhdr *) fp; ++ vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest); ++ } ++ ++ /* Change state to reflect transition to FC mode */ ++ if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) ++ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; ++ else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Unexpected fnic state while processing FLOGI response\n"); ++ return -1; ++ } ++ ++ /* ++ * Send FLOGI registration to firmware to set up FC mode. ++ * The new address will be set up when registration completes. ++ */ ++ ret = fnic_flogi_reg_handler(fnic, port_id); ++ if (ret < 0) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI registration error ret: %d fnic state: %d\n", ++ ret, fnic->state); ++ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) ++ fnic->state = FNIC_IN_ETH_MODE; ++ ++ return -1; ++ } ++ iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI registration success\n"); ++ return 0; ++ } ++ ++ void fnic_free_txq(struct list_head *head) ++ { ++ struct fnic_frame_list *cur_frame, *next; ++ ++ list_for_each_entry_safe(cur_frame, next, head, links) { ++ list_del(&cur_frame->links); ++ kfree(cur_frame->fp); ++ kfree(cur_frame); ++ } +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + } + + static void fnic_wq_complete_frame_send(struct vnic_wq *wq, +@@@ -1303,109 -870,3 +1435,112 @@@ void fnic_free_wq_buf(struct vnic_wq *w + buf->os_buf = NULL; + } + +++<<<<<<< HEAD + +void fnic_fcoe_reset_vlans(struct fnic *fnic) + +{ + + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fcoe_vlan *next; + + + + /* + + * indicate a link down to fcoe so that all fcf's are free'd + + * might not be required since we did this before sending vlan + + * discovery request + + */ + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (!list_empty(&fnic->vlans)) { + + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + + list_del(&vlan->list); + + kfree(vlan); + + } + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + +} + + + +void fnic_handle_fip_timer(struct fnic *fnic) + +{ + + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + u64 sol_time; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->stop_rx_link_events) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) + + return; + + + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (list_empty(&fnic->vlans)) { + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* no vlans available, try again */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fip_timer: vlan %d state %d sol_count %d\n", + + vlan->vid, vlan->state, vlan->sol_count); + + switch (vlan->state) { + + case FIP_VLAN_USED: + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "FIP VLAN is selected for FC transaction\n"); + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + break; + + case FIP_VLAN_FAILED: + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* if all vlans are in failed state, restart vlan disc */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + break; + + case FIP_VLAN_SENT: + + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + + /* + + * no response on this vlan, remove from the list. + + * Try the next vlan + + */ + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Dequeue this VLAN ID %d from list\n", + + vlan->vid); + + list_del(&vlan->list); + + kfree(vlan); + + vlan = NULL; + + if (list_empty(&fnic->vlans)) { + + /* we exhausted all vlans, restart vlan disc */ + + spin_unlock_irqrestore(&fnic->vlans_lock, + + flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "fip_timer: vlan list empty, " + + "trigger vlan disc\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + /* check the next vlan */ + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + + list); + + fnic->set_vlan(fnic, vlan->vid); + + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + vlan->sol_count++; + + sol_time = jiffies + msecs_to_jiffies + + (FCOE_CTLR_START_DELAY); + + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + + break; + + } + +} +++======= +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,b9374ccb4669..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -39,8 -29,9 +39,7 @@@ + #include "vnic_intr.h" + #include "vnic_stats.h" + #include "fnic_io.h" +- #include "fnic_fip.h" + #include "fnic.h" + -#include "fnic_fdls.h" + -#include "fdls_fc.h" + + #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 + +@@@ -90,8 -86,6 +89,11 @@@ module_param(fnic_max_qdepth, uint, S_I + MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + + static struct libfc_function_template fnic_transport_template = { +++<<<<<<< HEAD + + .frame_send = fnic_send, + + .lport_set_port_id = fnic_set_port_id, +++======= +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + .fcp_abort_io = fnic_empty_scsi_cleanup, + .fcp_cleanup = fnic_empty_scsi_cleanup, + .exch_mgr_reset = fnic_exch_mgr_reset +@@@ -424,13 -415,6 +428,16 @@@ static void fnic_notify_timer(struct ti + round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); + } + +++<<<<<<< HEAD + +static void fnic_fip_notify_timer(struct timer_list *t) + +{ + + struct fnic *fnic = from_timer(fnic, t, fip_timer); + + + + fnic_handle_fip_timer(fnic); + +} + + +++======= +++>>>>>>> 098585aa8aca (scsi: fnic: Add and integrate support for FIP) + static void fnic_notify_timer_start(struct fnic *fnic) + { + switch (vnic_dev_get_intr_mode(fnic->vdev)) { +@@@ -796,29 -811,24 +792,26 @@@ static int fnic_probe(struct pci_dev *p + fnic->vlan_hw_insert = 1; + fnic->vlan_id = 0; + +- /* Initialize the FIP fcoe_ctrl struct */ +- fnic->ctlr.send = fnic_eth_send; +- fnic->ctlr.update_mac = fnic_update_mac; +- fnic->ctlr.get_src_addr = fnic_get_mac; + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + - dev_info(&fnic->pdev->dev, "firmware supports FIP\n"); + + shost_printk(KERN_INFO, fnic->lport->host, + + "firmware supports FIP\n"); + /* enable directed and multicast */ + vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); + vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); + vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); +- fnic->set_vlan = fnic_set_vlan; + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); +- timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); +- INIT_WORK(&fnic->event_work, fnic_handle_event); +- skb_queue_head_init(&fnic->fip_frame_queue); +- INIT_LIST_HEAD(&fnic->evlist); +- INIT_LIST_HEAD(&fnic->vlans); ++ INIT_LIST_HEAD(&fnic->fip_frame_queue); ++ INIT_LIST_HEAD(&fnic->vlan_list); ++ timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0); ++ timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0); ++ timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0); ++ timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0); ++ fnic->set_vlan = fnic_set_vlan; + } else { + - dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n"); + + shost_printk(KERN_INFO, fnic->lport->host, + + "firmware uses non-FIP mode\n"); + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); + fnic->ctlr.state = FIP_ST_NON_FIP; + } +@@@ -1005,15 -1033,21 +998,18 @@@ static void fnic_remove(struct pci_dev + */ + flush_workqueue(fnic_event_queue); + skb_queue_purge(&fnic->frame_queue); + - fnic_free_txq(&fnic->tx_queue); + + skb_queue_purge(&fnic->tx_queue); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { +- del_timer_sync(&fnic->fip_timer); +- skb_queue_purge(&fnic->fip_frame_queue); ++ del_timer_sync(&fnic->retry_fip_timer); ++ del_timer_sync(&fnic->fcs_ka_timer); ++ del_timer_sync(&fnic->enode_ka_timer); ++ del_timer_sync(&fnic->vn_ka_timer); ++ ++ fnic_free_txq(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); +- fnic_fcoe_evlist_free(fnic); + } + + - if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) + - del_timer_sync(&fnic->iport.fabric.fdmi_timer); + - + /* + * Log off the fabric. This stops all remote ports, dns port, + * logs off the fabric. This flushes all rport, disc, lport work +* Unmerged path drivers/scsi/fnic/fnic_fip.h +diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile +index 6214a6b2e96d..44bf618b0223 100644 +--- a/drivers/scsi/fnic/Makefile ++++ b/drivers/scsi/fnic/Makefile +@@ -2,6 +2,7 @@ + obj-$(CONFIG_FCOE_FNIC) += fnic.o + + fnic-y := \ ++ fip.o\ + fnic_attrs.o \ + fnic_isr.o \ + fnic_main.o \ +diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c +new file mode 100644 +index 000000000000..71b5ceff45db +--- /dev/null ++++ b/drivers/scsi/fnic/fip.c +@@ -0,0 +1,983 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright 2008 Cisco Systems, Inc. All rights reserved. ++ * Copyright 2007 Nuova Systems, Inc. All rights reserved. ++ */ ++#include "fnic.h" ++#include "fip.h" ++#include ++ ++extern struct workqueue_struct *fnic_fip_queue; ++ ++#define FIP_FNIC_RESET_WAIT_COUNT 15 ++ ++/** ++ * fnic_fcoe_reset_vlans - Free up the list of discovered vlans ++ * @fnic: Handle to fnic driver instance ++ */ ++void fnic_fcoe_reset_vlans(struct fnic *fnic) ++{ ++ unsigned long flags; ++ struct fcoe_vlan *vlan, *next; ++ ++ spin_lock_irqsave(&fnic->vlans_lock, flags); ++ if (!list_empty(&fnic->vlan_list)) { ++ list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) { ++ list_del(&vlan->list); ++ kfree(vlan); ++ } ++ } ++ ++ spin_unlock_irqrestore(&fnic->vlans_lock, flags); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Reset vlan complete\n"); ++} ++ ++/** ++ * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC ++ * @fnic: Handle to fnic driver instance ++ */ ++void fnic_fcoe_send_vlan_req(struct fnic *fnic) ++{ ++ uint8_t *frame; ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fnic_stats *fnic_stats = &fnic->fnic_stats; ++ u64 vlan_tov; ++ struct fip_vlan_req *pvlan_req; ++ uint16_t frame_size = sizeof(struct fip_vlan_req); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FIP_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send VLAN req"); ++ return; ++ } ++ ++ fnic_fcoe_reset_vlans(fnic); ++ ++ fnic->set_vlan(fnic, 0); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "set vlan done\n"); ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0], ++ iport->hwmac[1], iport->hwmac[2], iport->hwmac[3], ++ iport->hwmac[4], iport->hwmac[5]); ++ ++ pvlan_req = (struct fip_vlan_req *) frame; ++ *pvlan_req = (struct fip_vlan_req) { ++ .eth = {.h_dest = FCOE_ALL_FCFS_MAC, ++ .h_proto = cpu_to_be16(ETH_P_FIP)}, ++ .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER), ++ .fip_op = cpu_to_be16(FIP_OP_VLAN), ++ .fip_subcode = FIP_SC_REQ, ++ .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)}, ++ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, ++ .fip_dlen = 2}} ++ }; ++ ++ memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN); ++ memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); ++ ++ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); ++ ++ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Send VLAN req\n"); ++ fnic_send_fip_frame(iport, frame, frame_size); ++ ++ vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); ++ mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov)); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fip timer set\n"); ++} ++ ++/** ++ * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and ++ * populates VLAN list. ++ * @fnic: Handle to fnic driver instance ++ * @fiph: Received FIP frame ++ * ++ * Will wait for responses from multiple FCFs until timeout. ++ */ ++void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph) ++{ ++ struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph; ++ ++ struct fnic_stats *fnic_stats = &fnic->fnic_stats; ++ u16 vid; ++ int num_vlan = 0; ++ int cur_desc, desc_len; ++ struct fcoe_vlan *vlan; ++ struct fip_vlan_desc *vlan_desc; ++ unsigned long flags; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p got vlan resp\n", fnic); ++ ++ desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "desc_len %d\n", desc_len); ++ ++ spin_lock_irqsave(&fnic->vlans_lock, flags); ++ ++ cur_desc = 0; ++ while (desc_len > 0) { ++ vlan_desc = ++ (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc) ++ + cur_desc * 4); ++ ++ if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) { ++ if (vlan_desc->fd_desc.fip_dlen != 1) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "Invalid descriptor length(%x) in VLan response\n", ++ vlan_desc->fd_desc.fip_dlen); ++ ++ } ++ num_vlan++; ++ vid = be16_to_cpu(vlan_desc->fd_vlan); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "process_vlan_resp: FIP VLAN %d\n", vid); ++ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); ++ ++ if (!vlan) { ++ /* retry from timer */ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "Mem Alloc failure\n"); ++ spin_unlock_irqrestore(&fnic->vlans_lock, ++ flags); ++ goto out; ++ } ++ vlan->vid = vid & 0x0fff; ++ vlan->state = FIP_VLAN_AVAIL; ++ list_add_tail(&vlan->list, &fnic->vlan_list); ++ break; ++ } else { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "Invalid descriptor type(%x) in VLan response\n", ++ vlan_desc->fd_desc.fip_dtype); ++ /* ++ * Note : received a type=2 descriptor here i.e. FIP ++ * MAC Address Descriptor ++ */ ++ } ++ cur_desc += vlan_desc->fd_desc.fip_dlen; ++ desc_len -= vlan_desc->fd_desc.fip_dlen; ++ } ++ ++ /* any VLAN descriptors present ? */ ++ if (num_vlan == 0) { ++ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p No VLAN descriptors in FIP VLAN response\n", ++ fnic); ++ } ++ ++ spin_unlock_irqrestore(&fnic->vlans_lock, flags); ++ ++ out: ++ return; ++} ++ ++/** ++ * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan ++ * @fnic: Handle to fnic driver instance ++ */ ++void fnic_fcoe_start_fcf_discovery(struct fnic *fnic) ++{ ++ uint8_t *frame; ++ struct fnic_iport_s *iport = &fnic->iport; ++ u64 fcs_tov; ++ struct fip_discovery *pdisc_sol; ++ uint16_t frame_size = sizeof(struct fip_discovery); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FIP_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to start FCF discovery"); ++ return; ++ } ++ ++ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); ++ ++ pdisc_sol = (struct fip_discovery *) frame; ++ *pdisc_sol = (struct fip_discovery) { ++ .eth = {.h_dest = FCOE_ALL_FCFS_MAC, ++ .h_proto = cpu_to_be16(ETH_P_FIP)}, ++ .fip = { ++ .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC), ++ .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN), ++ .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, ++ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, ++ .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}}, ++ .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1}, ++ .fd_size = cpu_to_be16(FCOE_MAX_SIZE)} ++ }; ++ ++ memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN); ++ memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); ++ iport->selected_fcf.fcf_priority = 0xFF; ++ ++ FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn); ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Start FCF discovery\n"); ++ fnic_send_fip_frame(iport, frame, frame_size); ++ ++ iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED; ++ ++ fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV); ++ mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov)); ++} ++ ++/** ++ * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements. ++ * @fnic: Handle to fnic driver instance ++ * @fiph: Received frame ++ * ++ * FCF advertisements can be: ++ * solicited - Sent in response of a discover FCF FIP request ++ * Store the information of the FCF with highest priority. ++ * Wait until timeout in case of multiple FCFs. ++ * ++ * unsolicited - Sent periodically by the FCF for keep alive. ++ * If FLOGI is in progress or completed and the advertisement is ++ * received by our selected FCF, refresh the keep alive timer. ++ */ ++void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph) ++{ ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph; ++ u64 fcs_ka_tov; ++ u64 tov; ++ int fka_has_changed; ++ ++ switch (iport->fip.state) { ++ case FDLS_FIP_FCF_DISCOVERY_STARTED: ++ if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "fnic 0x%p Solicited adv\n", fnic); ++ ++ if ((disc_adv->prio_desc.fd_pri < ++ iport->selected_fcf.fcf_priority) ++ && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) { ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "fnic 0x%p FCF Available\n", fnic); ++ memcpy(iport->selected_fcf.fcf_mac, ++ disc_adv->mac_desc.fd_mac, ETH_ALEN); ++ iport->selected_fcf.fcf_priority = ++ disc_adv->prio_desc.fd_pri; ++ iport->selected_fcf.fka_adv_period = ++ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, "adv time %d", ++ iport->selected_fcf.fka_adv_period); ++ iport->selected_fcf.ka_disabled = ++ (disc_adv->fka_adv_desc.fd_flags & 1); ++ } ++ } ++ break; ++ case FDLS_FIP_FLOGI_STARTED: ++ case FDLS_FIP_FLOGI_COMPLETE: ++ if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) { ++ /* same fcf */ ++ if (memcmp ++ (iport->selected_fcf.fcf_mac, ++ disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) { ++ if (iport->selected_fcf.fka_adv_period != ++ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) { ++ iport->selected_fcf.fka_adv_period = ++ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); ++ FNIC_FIP_DBG(KERN_INFO, ++ fnic->lport->host, ++ fnic->fnic_num, ++ "change fka to %d", ++ iport->selected_fcf.fka_adv_period); ++ } ++ ++ fka_has_changed = ++ (iport->selected_fcf.ka_disabled == 1) ++ && ((disc_adv->fka_adv_desc.fd_flags & 1) == ++ 0); ++ ++ iport->selected_fcf.ka_disabled = ++ (disc_adv->fka_adv_desc.fd_flags & 1); ++ if (!((iport->selected_fcf.ka_disabled) ++ || (iport->selected_fcf.fka_adv_period == ++ 0))) { ++ ++ fcs_ka_tov = jiffies ++ + 3 ++ * ++ msecs_to_jiffies(iport->selected_fcf.fka_adv_period); ++ mod_timer(&fnic->fcs_ka_timer, ++ round_jiffies(fcs_ka_tov)); ++ } else { ++ if (timer_pending(&fnic->fcs_ka_timer)) ++ del_timer_sync(&fnic->fcs_ka_timer); ++ } ++ ++ if (fka_has_changed) { ++ if (iport->selected_fcf.fka_adv_period != 0) { ++ tov = ++ jiffies + ++ msecs_to_jiffies( ++ iport->selected_fcf.fka_adv_period); ++ mod_timer(&fnic->enode_ka_timer, ++ round_jiffies(tov)); ++ ++ tov = ++ jiffies + ++ msecs_to_jiffies ++ (FIP_VN_KA_PERIOD); ++ mod_timer(&fnic->vn_ka_timer, ++ round_jiffies(tov)); ++ } ++ } ++ } ++ } ++ break; ++ default: ++ break; ++ } /* end switch */ ++} ++ ++/** ++ * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF ++ * @fnic: Handle to fnic driver instance ++ */ ++void fnic_fcoe_start_flogi(struct fnic *fnic) ++{ ++ uint8_t *frame; ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fip_flogi *pflogi_req; ++ u64 flogi_tov; ++ uint16_t oxid; ++ uint16_t frame_size = sizeof(struct fip_flogi); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FIP_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to start FIP FLOGI"); ++ return; ++ } ++ ++ pflogi_req = (struct fip_flogi *) frame; ++ *pflogi_req = (struct fip_flogi) { ++ .eth = { ++ .h_proto = cpu_to_be16(ETH_P_FIP)}, ++ .fip = { ++ .fip_ver = FIP_VER_ENCAPS(FIP_VER), ++ .fip_op = cpu_to_be16(FIP_OP_LS), ++ .fip_subcode = FIP_SC_REQ, ++ .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN), ++ .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, ++ .flogi_desc = { ++ .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36}, ++ .flogi = { ++ .fchdr = { ++ .fh_r_ctl = FC_RCTL_ELS_REQ, ++ .fh_d_id = {0xFF, 0xFF, 0xFE}, ++ .fh_type = FC_TYPE_ELS, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .els = { ++ .fl_cmd = ELS_FLOGI, ++ .fl_csp = { ++ .sp_hi_ver = ++ FNIC_FC_PH_VER_HI, ++ .sp_lo_ver = ++ FNIC_FC_PH_VER_LO, ++ .sp_bb_cred = ++ cpu_to_be16 ++ (FNIC_FC_B2B_CREDIT), ++ .sp_bb_data = ++ cpu_to_be16 ++ (FNIC_FC_B2B_RDF_SZ)}, ++ .fl_cssp[2].cp_class = ++ cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) ++ }, ++ } ++ }, ++ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} ++ }; ++ ++ memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN); ++ if (iport->usefip) ++ memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac, ++ ETH_ALEN); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, ++ &iport->active_oxid_fabric_req); ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate OXID to send FIP FLOGI"); ++ mempool_free(frame, fnic->frame_pool); ++ return; ++ } ++ FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid); ++ ++ FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn, ++ iport->wwpn); ++ FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn, ++ iport->wwnn); ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FIP start FLOGI\n"); ++ fnic_send_fip_frame(iport, frame, frame_size); ++ iport->fip.flogi_retry++; ++ ++ iport->fip.state = FDLS_FIP_FLOGI_STARTED; ++ flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout); ++ mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov)); ++} ++ ++/** ++ * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF. ++ * @fnic: Handle to fnic driver instance ++ * @fiph: Received frame ++ * ++ * If successful save assigned fc_id and MAC, program firmware ++ * and start fdls discovery, else restart vlan discovery. ++ */ ++void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph) ++{ ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph; ++ int desc_len; ++ uint32_t s_id; ++ int frame_type; ++ uint16_t oxid; ++ ++ struct fnic_stats *fnic_stats = &fnic->fnic_stats; ++ struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p FIP FLOGI rsp\n", fnic); ++ desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len); ++ if (desc_len != 38) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Invalid Descriptor List len (%x). Dropping frame\n", ++ desc_len); ++ return; ++ } ++ ++ if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7) ++ && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36)) ++ || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2) ++ && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Dropping frame invalid type and len mix\n"); ++ return; ++ } ++ ++ frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr); ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ if ((fchdr->fh_f_ctl[0] != 0x98) ++ || (fchdr->fh_r_ctl != 0x23) ++ || (s_id != FC_FID_FLOGI) ++ || (frame_type != FNIC_FABRIC_FLOGI_RSP) ++ || (fchdr->fh_type != 0x01)) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n", ++ s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl, ++ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); ++ return; ++ } ++ ++ if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p rsp for pending FLOGI\n", fnic); ++ ++ oxid = FNIC_STD_GET_OX_ID(fchdr); ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ del_timer_sync(&fnic->retry_fip_timer); ++ ++ if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN) ++ && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) { ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "fnic 0x%p FLOGI success\n", fnic); ++ memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN); ++ iport->fcid = ++ ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id); ++ ++ iport->r_a_tov = ++ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov); ++ iport->e_d_tov = ++ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov); ++ memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac, ++ ETH_ALEN); ++ vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac); ++ ++ if (fnic_fdls_register_portid(iport, iport->fcid, NULL) ++ != 0) { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "fnic 0x%p flogi registration failed\n", ++ fnic); ++ return; ++ } ++ ++ iport->fip.state = FDLS_FIP_FLOGI_COMPLETE; ++ iport->state = FNIC_IPORT_STATE_FABRIC_DISC; ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, "iport->state:%d\n", ++ iport->state); ++ if (!((iport->selected_fcf.ka_disabled) ++ || (iport->selected_fcf.fka_adv_period == 0))) { ++ u64 tov; ++ ++ tov = jiffies ++ + ++ msecs_to_jiffies(iport->selected_fcf.fka_adv_period); ++ mod_timer(&fnic->enode_ka_timer, ++ round_jiffies(tov)); ++ ++ tov = ++ jiffies + ++ msecs_to_jiffies(FIP_VN_KA_PERIOD); ++ mod_timer(&fnic->vn_ka_timer, ++ round_jiffies(tov)); ++ ++ } ++ } else { ++ /* ++ * If there's FLOGI rejects - clear all ++ * fcf's & restart from scratch ++ */ ++ atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects); ++ /* start FCoE VLAN discovery */ ++ fnic_fcoe_send_vlan_req(fnic); ++ ++ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; ++ } ++ } ++} ++ ++/** ++ * fnic_common_fip_cleanup - Clean up FCF info and timers in case of ++ * link down/CVL ++ * @fnic: Handle to fnic driver instance ++ */ ++void fnic_common_fip_cleanup(struct fnic *fnic) ++{ ++ ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ if (!iport->usefip) ++ return; ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p fip cleanup\n", fnic); ++ ++ iport->fip.state = FDLS_FIP_INIT; ++ ++ del_timer_sync(&fnic->retry_fip_timer); ++ del_timer_sync(&fnic->fcs_ka_timer); ++ del_timer_sync(&fnic->enode_ka_timer); ++ del_timer_sync(&fnic->vn_ka_timer); ++ ++ if (!is_zero_ether_addr(iport->fpma)) ++ vnic_dev_del_addr(fnic->vdev, iport->fpma); ++ ++ memset(iport->fpma, 0, ETH_ALEN); ++ iport->fcid = 0; ++ iport->r_a_tov = 0; ++ iport->e_d_tov = 0; ++ memset(fnic->iport.fcfmac, 0, ETH_ALEN); ++ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); ++ iport->selected_fcf.fcf_priority = 0; ++ iport->selected_fcf.fka_adv_period = 0; ++ iport->selected_fcf.ka_disabled = 0; ++ ++ fnic_fcoe_reset_vlans(fnic); ++} ++ ++/** ++ * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF. ++ * @fnic: Handle to fnic driver instance ++ * @fiph: Received frame ++ * ++ * Verify that cvl is received from our current FCF for our assigned MAC ++ * and clean up and restart the vlan discovery. ++ */ ++void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph) ++{ ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph; ++ int i; ++ int found = false; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p clear virtual link handler\n", fnic); ++ ++ if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2) ++ && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2)) ++ || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4) ++ && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) { ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "invalid mix: ft %x fl %x ndt %x ndl %x", ++ cvl_msg->fcf_mac_desc.fd_desc.fip_dtype, ++ cvl_msg->fcf_mac_desc.fd_desc.fip_dlen, ++ cvl_msg->name_desc.fd_desc.fip_dtype, ++ cvl_msg->name_desc.fd_desc.fip_dlen); ++ } ++ ++ if (memcmp ++ (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN) ++ == 0) { ++ for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) { ++ if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11) ++ && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) { ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, ++ "Invalid type and len mix type: %d len: %d\n", ++ cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype, ++ cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen); ++ } ++ if (memcmp ++ (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac, ++ ETH_ALEN) == 0) { ++ found = true; ++ break; ++ } ++ } ++ if (!found) ++ return; ++ fnic_common_fip_cleanup(fnic); ++ ++ fnic_fcoe_send_vlan_req(fnic); ++ } ++} ++ ++/** ++ * fdls_fip_recv_frame - Demultiplexer for FIP frames ++ * @fnic: Handle to fnic driver instance ++ * @frame: Received ethernet frame ++ */ ++int fdls_fip_recv_frame(struct fnic *fnic, void *frame) ++{ ++ struct ethhdr *eth = (struct ethhdr *)frame; ++ struct fip_header *fiph; ++ u16 op; ++ u8 sub; ++ int len = 2048; ++ ++ if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) { ++ fiph = (struct fip_header *)(eth + 1); ++ op = be16_to_cpu(fiph->fip_op); ++ sub = fiph->fip_subcode; ++ ++ fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming"); ++ ++ if (op == FIP_OP_DISC && sub == FIP_SC_REP) ++ fnic_fcoe_fip_discovery_resp(fnic, fiph); ++ else if (op == FIP_OP_VLAN && sub == FIP_SC_REP) ++ fnic_fcoe_process_vlan_resp(fnic, fiph); ++ else if (op == FIP_OP_CTRL && sub == FIP_SC_REP) ++ fnic_fcoe_process_cvl(fnic, fiph); ++ else if (op == FIP_OP_LS && sub == FIP_SC_REP) ++ fnic_fcoe_process_flogi_resp(fnic, fiph); ++ ++ /* Return true if the frame was a FIP frame */ ++ return true; ++ } ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Not a FIP Frame"); ++ return false; ++} ++ ++void fnic_work_on_fip_timer(struct work_struct *work) ++{ ++ struct fnic *fnic = container_of(work, struct fnic, fip_timer_work); ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FIP timeout\n"); ++ ++ if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) { ++ fnic_vlan_discovery_timeout(fnic); ++ } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) { ++ u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 }; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FCF Discovery timeout\n"); ++ if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) { ++ ++ if (iport->flags & FNIC_FIRST_LINK_UP) ++ iport->flags &= ~FNIC_FIRST_LINK_UP; ++ ++ fnic_fcoe_start_flogi(fnic); ++ if (!((iport->selected_fcf.ka_disabled) ++ || (iport->selected_fcf.fka_adv_period == 0))) { ++ u64 fcf_tov; ++ ++ fcf_tov = jiffies ++ + 3 ++ * ++ msecs_to_jiffies(iport->selected_fcf.fka_adv_period); ++ mod_timer(&fnic->fcs_ka_timer, ++ round_jiffies(fcf_tov)); ++ } ++ } else { ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, ++ fnic->fnic_num, "FCF Discovery timeout\n"); ++ fnic_vlan_discovery_timeout(fnic); ++ } ++ } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI timeout\n"); ++ if (iport->fip.flogi_retry < fnic->config.flogi_retries) ++ fnic_fcoe_start_flogi(fnic); ++ else ++ fnic_vlan_discovery_timeout(fnic); ++ } ++} ++ ++/** ++ * fnic_handle_fip_timer - Timeout handler for FIP discover phase. ++ * @t: Handle to the timer list ++ * ++ * Based on the current state, start next phase or restart discovery. ++ */ ++void fnic_handle_fip_timer(struct timer_list *t) ++{ ++ struct fnic *fnic = from_timer(fnic, t, retry_fip_timer); ++ ++ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer); ++ queue_work(fnic_fip_queue, &fnic->fip_timer_work); ++} ++ ++/** ++ * fnic_handle_enode_ka_timer - FIP node keep alive. ++ * @t: Handle to the timer list ++ */ ++void fnic_handle_enode_ka_timer(struct timer_list *t) ++{ ++ uint8_t *frame; ++ struct fnic *fnic = from_timer(fnic, t, enode_ka_timer); ++ ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fip_enode_ka *penode_ka; ++ u64 enode_ka_tov; ++ uint16_t frame_size = sizeof(struct fip_enode_ka); ++ ++ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) ++ return; ++ ++ if ((iport->selected_fcf.ka_disabled) ++ || (iport->selected_fcf.fka_adv_period == 0)) { ++ return; ++ } ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FIP_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send enode ka"); ++ return; ++ } ++ ++ penode_ka = (struct fip_enode_ka *) frame; ++ *penode_ka = (struct fip_enode_ka) { ++ .eth = { ++ .h_proto = cpu_to_be16(ETH_P_FIP)}, ++ .fip = { ++ .fip_ver = FIP_VER_ENCAPS(FIP_VER), ++ .fip_op = cpu_to_be16(FIP_OP_CTRL), ++ .fip_subcode = FIP_SC_REQ, ++ .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)}, ++ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} ++ }; ++ ++ memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN); ++ memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); ++ memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); ++ ++ FNIC_FIP_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "Handle enode KA timer\n"); ++ fnic_send_fip_frame(iport, frame, frame_size); ++ enode_ka_tov = jiffies ++ + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); ++ mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov)); ++} ++ ++/** ++ * fnic_handle_vn_ka_timer - FIP virtual port keep alive. ++ * @t: Handle to the timer list ++ */ ++void fnic_handle_vn_ka_timer(struct timer_list *t) ++{ ++ uint8_t *frame; ++ struct fnic *fnic = from_timer(fnic, t, vn_ka_timer); ++ ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fip_vn_port_ka *pvn_port_ka; ++ u64 vn_ka_tov; ++ uint8_t fcid[3]; ++ uint16_t frame_size = sizeof(struct fip_vn_port_ka); ++ ++ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) ++ return; ++ ++ if ((iport->selected_fcf.ka_disabled) ++ || (iport->selected_fcf.fka_adv_period == 0)) { ++ return; ++ } ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FIP_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send vn ka"); ++ return; ++ } ++ ++ pvn_port_ka = (struct fip_vn_port_ka *) frame; ++ *pvn_port_ka = (struct fip_vn_port_ka) { ++ .eth = { ++ .h_proto = cpu_to_be16(ETH_P_FIP)}, ++ .fip = { ++ .fip_ver = FIP_VER_ENCAPS(FIP_VER), ++ .fip_op = cpu_to_be16(FIP_OP_CTRL), ++ .fip_subcode = FIP_SC_REQ, ++ .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)}, ++ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, ++ .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}} ++ }; ++ ++ memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN); ++ memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); ++ memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); ++ memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN); ++ hton24(fcid, iport->fcid); ++ memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3); ++ FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn); ++ ++ FNIC_FIP_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "Handle vnport KA timer\n"); ++ fnic_send_fip_frame(iport, frame, frame_size); ++ vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); ++ mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov)); ++} ++ ++/** ++ * fnic_vlan_discovery_timeout - Handle vlan discovery timeout ++ * @fnic: Handle to fnic driver instance ++ * ++ * End of VLAN discovery or FCF discovery time window. ++ * Start the FCF discovery if VLAN was never used. ++ */ ++void fnic_vlan_discovery_timeout(struct fnic *fnic) ++{ ++ struct fcoe_vlan *vlan; ++ struct fnic_iport_s *iport = &fnic->iport; ++ struct fnic_stats *fnic_stats = &fnic->fnic_stats; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (fnic->stop_rx_link_events) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ if (!iport->usefip) ++ return; ++ ++ spin_lock_irqsave(&fnic->vlans_lock, flags); ++ if (list_empty(&fnic->vlan_list)) { ++ /* no vlans available, try again */ ++ spin_unlock_irqrestore(&fnic->vlans_lock, flags); ++ fnic_fcoe_send_vlan_req(fnic); ++ return; ++ } ++ ++ vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list); ++ ++ if (vlan->state == FIP_VLAN_SENT) { ++ if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { ++ /* ++ * no response on this vlan, remove from the list. ++ * Try the next vlan ++ */ ++ list_del(&vlan->list); ++ kfree(vlan); ++ vlan = NULL; ++ if (list_empty(&fnic->vlan_list)) { ++ /* we exhausted all vlans, restart vlan disc */ ++ spin_unlock_irqrestore(&fnic->vlans_lock, ++ flags); ++ fnic_fcoe_send_vlan_req(fnic); ++ return; ++ } ++ /* check the next vlan */ ++ vlan = ++ list_first_entry(&fnic->vlan_list, struct fcoe_vlan, ++ list); ++ ++ fnic->set_vlan(fnic, vlan->vid); ++ vlan->state = FIP_VLAN_SENT; /* sent now */ ++ ++ } ++ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); ++ ++ } else { ++ fnic->set_vlan(fnic, vlan->vid); ++ vlan->state = FIP_VLAN_SENT; /* sent now */ ++ } ++ vlan->sol_count++; ++ spin_unlock_irqrestore(&fnic->vlans_lock, flags); ++ fnic_fcoe_start_fcf_discovery(fnic); ++} ++ ++/** ++ * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer. ++ * @work: the work queue to be serviced ++ * ++ * Finish handling fcs_ka_timer in process context. ++ * Clean up, bring the link down, and restart all FIP discovery. ++ */ ++void fnic_work_on_fcs_ka_timer(struct work_struct *work) ++{ ++ struct fnic ++ *fnic = container_of(work, struct fnic, fip_timer_work); ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ FNIC_FIP_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic 0x%p fcs ka timeout\n", fnic); ++ ++ fnic_common_fip_cleanup(fnic); ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ iport->state = FNIC_IPORT_STATE_FIP; ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ ++ fnic_fcoe_send_vlan_req(fnic); ++} ++ ++/** ++ * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer. ++ * @t: Handle to the timer list ++ * ++ * No keep alives received from FCF. Clean up, bring the link down ++ * and restart all the FIP discovery. ++ */ ++void fnic_handle_fcs_ka_timer(struct timer_list *t) ++{ ++ struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer); ++ ++ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer); ++ queue_work(fnic_fip_queue, &fnic->fip_timer_work); ++} +diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h +new file mode 100644 +index 000000000000..be727ac19af6 +--- /dev/null ++++ b/drivers/scsi/fnic/fip.h +@@ -0,0 +1,157 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2008 Cisco Systems, Inc. All rights reserved. ++ * Copyright 2007 Nuova Systems, Inc. All rights reserved. ++ */ ++#ifndef _FIP_H_ ++#define _FIP_H_ ++ ++#include "fdls_fc.h" ++#include "fnic_fdls.h" ++#include ++ ++/* Drop the cast from the standard definition */ ++#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02} ++#define FCOE_MAX_SIZE 0x082E ++ ++#define FCOE_CTLR_FIPVLAN_TOV (3*1000) ++#define FCOE_CTLR_FCS_TOV (3*1000) ++#define FCOE_CTLR_MAX_SOL (5*1000) ++ ++#define FIP_DISC_SOL_LEN (6) ++#define FIP_VLAN_REQ_LEN (2) ++#define FIP_ENODE_KA_LEN (2) ++#define FIP_VN_KA_LEN (7) ++#define FIP_FLOGI_LEN (38) ++ ++enum fdls_vlan_state { ++ FIP_VLAN_AVAIL, ++ FIP_VLAN_SENT ++}; ++ ++enum fdls_fip_state { ++ FDLS_FIP_INIT, ++ FDLS_FIP_VLAN_DISCOVERY_STARTED, ++ FDLS_FIP_FCF_DISCOVERY_STARTED, ++ FDLS_FIP_FLOGI_STARTED, ++ FDLS_FIP_FLOGI_COMPLETE, ++}; ++ ++/* ++ * VLAN entry. ++ */ ++struct fcoe_vlan { ++ struct list_head list; ++ uint16_t vid; /* vlan ID */ ++ uint16_t sol_count; /* no. of sols sent */ ++ uint16_t state; /* state */ ++}; ++ ++struct fip_vlan_req { ++ struct ethhdr eth; ++ struct fip_header fip; ++ struct fip_mac_desc mac_desc; ++} __packed; ++ ++struct fip_vlan_notif { ++ struct fip_header fip; ++ struct fip_vlan_desc vlans_desc[]; ++} __packed; ++ ++struct fip_vn_port_ka { ++ struct ethhdr eth; ++ struct fip_header fip; ++ struct fip_mac_desc mac_desc; ++ struct fip_vn_desc vn_port_desc; ++} __packed; ++ ++struct fip_enode_ka { ++ struct ethhdr eth; ++ struct fip_header fip; ++ struct fip_mac_desc mac_desc; ++} __packed; ++ ++struct fip_cvl { ++ struct fip_header fip; ++ struct fip_mac_desc fcf_mac_desc; ++ struct fip_wwn_desc name_desc; ++ struct fip_vn_desc vn_ports_desc[]; ++} __packed; ++ ++struct fip_flogi_desc { ++ struct fip_desc fd_desc; ++ uint16_t rsvd; ++ struct fc_std_flogi flogi; ++} __packed; ++ ++struct fip_flogi_rsp_desc { ++ struct fip_desc fd_desc; ++ uint16_t rsvd; ++ struct fc_std_flogi flogi; ++} __packed; ++ ++struct fip_flogi { ++ struct ethhdr eth; ++ struct fip_header fip; ++ struct fip_flogi_desc flogi_desc; ++ struct fip_mac_desc mac_desc; ++} __packed; ++ ++struct fip_flogi_rsp { ++ struct fip_header fip; ++ struct fip_flogi_rsp_desc rsp_desc; ++ struct fip_mac_desc mac_desc; ++} __packed; ++ ++struct fip_discovery { ++ struct ethhdr eth; ++ struct fip_header fip; ++ struct fip_mac_desc mac_desc; ++ struct fip_wwn_desc name_desc; ++ struct fip_size_desc fcoe_desc; ++} __packed; ++ ++struct fip_disc_adv { ++ struct fip_header fip; ++ struct fip_pri_desc prio_desc; ++ struct fip_mac_desc mac_desc; ++ struct fip_wwn_desc name_desc; ++ struct fip_fab_desc fabric_desc; ++ struct fip_fka_desc fka_adv_desc; ++} __packed; ++ ++void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph); ++void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph); ++void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph); ++void fnic_work_on_fip_timer(struct work_struct *work); ++void fnic_work_on_fcs_ka_timer(struct work_struct *work); ++void fnic_fcoe_send_vlan_req(struct fnic *fnic); ++void fnic_fcoe_start_fcf_discovery(struct fnic *fnic); ++void fnic_fcoe_start_flogi(struct fnic *fnic); ++void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph); ++void fnic_vlan_discovery_timeout(struct fnic *fnic); ++ ++#ifdef FNIC_DEBUG ++static inline void ++fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, ++ int len, char *pfx) ++{ ++ struct fip_header *fiph = (struct fip_header *)(eth + 1); ++ u16 op = be16_to_cpu(fiph->fip_op); ++ u8 sub = fiph->fip_subcode; ++ ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)", ++ pfx, op, sub, len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)eth, len); ++} ++ ++#else /* FNIC_DEBUG */ ++ ++static inline void ++fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, ++ int len, char *pfx) {} ++#endif /* FNIC_DEBUG */ ++ ++#endif /* _FIP_H_ */ +* Unmerged path drivers/scsi/fnic/fnic.h +* Unmerged path drivers/scsi/fnic/fnic_fcs.c +* Unmerged path drivers/scsi/fnic/fnic_fip.h +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/09c1e6ab.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/09c1e6ab.failed new file mode 100644 index 0000000000000..bdb2710398605 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/09c1e6ab.failed @@ -0,0 +1,98 @@ +scsi: fnic: Add and integrate support for FDMI + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 09c1e6ab4ab2a107d96f119950dc330e446dc2b0 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/09c1e6ab.failed + +Add support for Fabric-Device Management Interface (FDMI) by introducing +PCI device IDs for Cisco Hardware. + +Introduce a module parameter to enable/disable FDMI support. + +Integrate support for FDMI. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202406110734.p2v8dq9v-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-8-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 09c1e6ab4ab2a107d96f119950dc330e446dc2b0) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,c75716856417..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -601,6 -613,9 +604,12 @@@ static int fnic_probe(struct pci_dev *p + int fnic_id = 0; + int i; + unsigned long flags; +++<<<<<<< HEAD +++======= ++ int hwq; ++ char *desc, *subsys_desc; ++ int len; +++>>>>>>> 09c1e6ab4ab2 (scsi: fnic: Add and integrate support for FDMI) + + /* + * Allocate SCSI Host and set up association between host, +@@@ -634,10 -649,22 +643,29 @@@ + fnic->fnic_num = fnic_id; + fnic_stats_debugfs_init(fnic); + +++<<<<<<< HEAD + + /* Setup PCI resources */ + + pci_set_drvdata(pdev, fnic); + + + + fnic->pdev = pdev; +++======= ++ /* Find model name from PCIe subsys ID */ ++ if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) { ++ dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc); ++ ++ /* Update FDMI model */ ++ fnic->subsys_desc_len = strlen(subsys_desc); ++ len = ARRAY_SIZE(fnic->subsys_desc); ++ if (fnic->subsys_desc_len > len) ++ fnic->subsys_desc_len = len; ++ memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len); ++ dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc); ++ } else { ++ fnic->subsys_desc_len = 0; ++ dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown", ++ pdev->subsystem_device); ++ } +++>>>>>>> 09c1e6ab4ab2 (scsi: fnic: Add and integrate support for FDMI) + + err = pci_enable_device(pdev); + if (err) { +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 73fb8245c7b7..f3288c3b45cd 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -127,6 +127,7 @@ + #define fnic_clear_state_flags(fnicp, st_flags) \ + __fnic_set_state_flags(fnicp, st_flags, 1) + ++extern unsigned int fnic_fdmi_support; + extern unsigned int fnic_log_level; + extern unsigned int io_completions; + +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/15924b05.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/15924b05.failed new file mode 100644 index 0000000000000..ce8ea0a526bd8 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/15924b05.failed @@ -0,0 +1,84 @@ +scsi: fnic: Replace sgreset tag with max_tag_id + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 15924b0503630016dee4dbb945a8df4df659070b +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/15924b05.failed + +sgreset is issued with a SCSI command pointer. The device reset code +assumes that it was issued on a hardware queue, and calls block multiqueue +layer. However, the assumption is broken, and there is no hardware queue +associated with the sgreset, and this leads to a crash due to a null +pointer exception. + +Fix the code to use the max_tag_id as a tag which does not overlap with the +other tags issued by mid layer. + +Tested by running FC traffic for a few minutes, and by issuing sgreset on +the device in parallel. Without the fix, the crash is observed right away. +With this fix, no crash is observed. + + Reviewed-by: Sesidhar Baddela + Tested-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20230817182146.229059-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 15924b0503630016dee4dbb945a8df4df659070b) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic.h +index 38c8614e6aed,93c68931a593..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -39,7 -27,7 +39,11 @@@ + + #define DRV_NAME "fnic" + #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +++<<<<<<< HEAD + +#define DRV_VERSION "1.6.0.55" +++======= ++ #define DRV_VERSION "1.6.0.56" +++>>>>>>> 15924b050363 (scsi: fnic: Replace sgreset tag with max_tag_id) + #define PFX DRV_NAME ": " + #define DFX DRV_NAME "%d: " + +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 0e605e372129,8ce3e3c3a882..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -2278,8 -2248,7 +2277,12 @@@ int fnic_device_reset(struct scsi_cmnd + goto fnic_device_reset_end; + } + +++<<<<<<< HEAD + + CMD_FLAGS(sc) = FNIC_DEVICE_RESET; + + /* Allocate tag if not present */ +++======= ++ fnic_priv(sc)->flags = FNIC_DEVICE_RESET; +++>>>>>>> 15924b050363 (scsi: fnic: Replace sgreset tag with max_tag_id) + + if (unlikely(tag < 0)) { + /* +@@@ -2459,11 -2429,10 +2463,10 @@@ fnic_device_reset_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + +- /* free tag if it is allocated */ +- if (unlikely(tag_gen_flag)) +- fnic_scsi_host_end_tag(fnic, sc); ++ if (new_sc) ++ mutex_unlock(&fnic->sgreset_mutex); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from device reset %s\n", +* Unmerged path drivers/scsi/fnic/fnic.h +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/160d6ec6.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/160d6ec6.failed new file mode 100644 index 0000000000000..1b8b3734387e9 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/160d6ec6.failed @@ -0,0 +1,56 @@ +scsi: fnic: Remove redundant flush_workqueue() calls + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Chen Ni +commit 160d6ec69f401037a9a00b9b6569082e4d0649b0 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/160d6ec6.failed + +destroy_workqueue() already drains the queue before destroying it, so +there is no need to flush it explicitly. + +Remove the redundant flush_workqueue() calls. + +This was generated with coccinelle: + +@@ +expression E; +@@ +- flush_workqueue(E); + destroy_workqueue(E); + + Signed-off-by: Chen Ni +Link: https://lore.kernel.org/r/20250312074320.1430175-1-nichen@iscas.ac.cn + Reviewed-by: Karan Tilak Kumar + Signed-off-by: Martin K. Petersen +(cherry picked from commit 160d6ec69f401037a9a00b9b6569082e4d0649b0) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,3dd06376e97b..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -1186,8 -1361,13 +1186,18 @@@ static void __exit fnic_cleanup_module( + { + pci_unregister_driver(&fnic_driver); + destroy_workqueue(fnic_event_queue); +++<<<<<<< HEAD + + if (fnic_fip_queue) + + destroy_workqueue(fnic_fip_queue); +++======= ++ ++ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) ++ destroy_workqueue(reset_fnic_work_queue); ++ ++ if (fnic_fip_queue) ++ destroy_workqueue(fnic_fip_queue); ++ +++>>>>>>> 160d6ec69f40 (scsi: fnic: Remove redundant flush_workqueue() calls) + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + kmem_cache_destroy(fnic_io_req_cache); +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/17789f8a.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/17789f8a.failed new file mode 100644 index 0000000000000..ebeaf93499891 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/17789f8a.failed @@ -0,0 +1,68 @@ +scsi: fnic: Delete incorrect debugfs error handling + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Dan Carpenter +commit 17789f8a5b81356fc83cf20de899fc351679574e +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/17789f8a.failed + +Debugfs functions are not supposed to require error checking and, in fact, +adding checks would normally lead to the driver refusing to load when +CONFIG_DEBUGFS is disabled. + +What saves us here is that this code checks for NULL instead of error +pointers so the error checking is all dead code. Delete it. + + Signed-off-by: Dan Carpenter +Link: https://lore.kernel.org/r/a5c237cd-449b-4f9d-bcff-6285fb7c28d1@stanley.mountain + Reviewed-by: Karan Tilak Kumar + Signed-off-by: Martin K. Petersen +(cherry picked from commit 17789f8a5b81356fc83cf20de899fc351679574e) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_debugfs.c +diff --cc drivers/scsi/fnic/fnic_debugfs.c +index 1a4151ef90c1,5767862ae42f..000000000000 +--- a/drivers/scsi/fnic/fnic_debugfs.c ++++ b/drivers/scsi/fnic/fnic_debugfs.c +@@@ -691,22 -681,21 +691,36 @@@ void fnic_stats_debugfs_init(struct fni + { + char name[16]; + +++<<<<<<< HEAD + + snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + + + + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, + + fnic_stats_debugfs_root); + + +++======= ++ snprintf(name, sizeof(name), "host%d", fnic->host->host_no); ++ ++ fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, ++ fnic_stats_debugfs_root); +++>>>>>>> 17789f8a5b81 (scsi: fnic: Delete incorrect debugfs error handling) + fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_stats_debugfs_fops); +++<<<<<<< HEAD + + +++======= +++>>>>>>> 17789f8a5b81 (scsi: fnic: Delete incorrect debugfs error handling) + fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_reset_debugfs_fops); +++<<<<<<< HEAD +++======= ++ return 0; +++>>>>>>> 17789f8a5b81 (scsi: fnic: Delete incorrect debugfs error handling) + } + + /* +* Unmerged path drivers/scsi/fnic/fnic_debugfs.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/18b5cb6f.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/18b5cb6f.failed new file mode 100644 index 0000000000000..ed7cfeebe1546 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/18b5cb6f.failed @@ -0,0 +1,44 @@ +scsi: fnic: Set appropriate logging level for log message + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 18b5cb6f1fdda4454f55a31f7c78d94da62be495 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/18b5cb6f.failed + +Replace KERN_INFO with KERN_DEBUG for a log message. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/stable/20250612002212.4144-1-kartilak%40cisco.com +Link: https://lore.kernel.org/r/20250618003431.6314-4-kartilak@cisco.com + Reviewed-by: John Meneghini + Signed-off-by: Martin K. Petersen +(cherry picked from commit 18b5cb6f1fdda4454f55a31f7c78d94da62be495) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f,75b29a018d1f..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -931,6 -1045,9 +931,12 @@@ static void fnic_fcpio_icmnd_cmpl_handl + + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&fnic_stats->misc_stats.queue_fulls); +++<<<<<<< HEAD +++======= ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "xfer_len: %llu", xfer_len); +++>>>>>>> 18b5cb6f1fdd (scsi: fnic: Set appropriate logging level for log message) + break; + + case FCPIO_TIMEOUT: /* request was timed out */ +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/2c770819.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/2c770819.failed new file mode 100644 index 0000000000000..b2c45ff3de747 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/2c770819.failed @@ -0,0 +1,429 @@ +scsi: fnic: Add Cisco hardware model names + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 2c77081969ee00ec31abda0cf6a26bc269f12ab2 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/2c770819.failed + +Add model IDs for Cisco VIC. + + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-7-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 2c77081969ee00ec31abda0cf6a26bc269f12ab2) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,c2978c5c6e8f..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -88,16 -82,100 +88,90 @@@ + #define FNIC_DEV_RST_TERM_DONE BIT(20) + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + +++<<<<<<< HEAD +++======= ++ #define IS_FNIC_FCP_INITIATOR(fnic) (fnic->role == FNIC_ROLE_FCP_INITIATOR) ++ ++ /* Retry supported by rport (returned by PRLI service parameters) */ ++ #define FNIC_FC_RP_FLAGS_RETRY 0x1 ++ ++ /* Cisco vendor id */ ++ #define PCI_VENDOR_ID_CISCO 0x1137 ++ #define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ ++ ++ /* sereno pcie switch */ ++ #define PCI_DEVICE_ID_CISCO_SERENO 0x004e ++ #define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ ++ #define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ ++ #define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ ++ ++ /* Sereno */ ++ #define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ ++ #define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ ++ #define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ ++ ++ /* Cruz */ ++ #define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ ++ /* Cruz MountTian SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b ++ #define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ ++ /* Cruz MountTian2 SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 ++ #define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ ++ ++ /* Bodega */ ++ /* VIC 1457 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 ++ #define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ ++ /* VIC 1487 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a ++ #define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ ++ /* VIC 1440 Mezz mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 ++ #define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ ++ ++ /* Beverly */ ++ #define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ ++ #define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ ++ #define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ ++ #define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ ++ #define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ ++ #define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ ++ #define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ ++ ++ struct fnic_pcie_device { ++ u32 device; ++ u8 *desc; ++ u32 subsystem_device; ++ u8 *subsys_desc; ++ }; ++ +++>>>>>>> 2c77081969ee (scsi: fnic: Add Cisco hardware model names) + /* + - * fnic private data per SCSI command. + + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. + */ + -struct fnic_cmd_priv { + - struct fnic_io_req *io_req; + - enum fnic_ioreq_state state; + - u32 flags; + - u16 abts_status; + - u16 lr_status; + -}; + - + -static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) + -{ + - return scsi_cmd_priv(cmd); + -} + - + -static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) + -{ + - struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + - + - return ((u64)fcmd->flags << 32) | fcmd->state; + -} + +#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) + +#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) + +#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) + +#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) + +#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) + +#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) + + #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +@@@ -299,8 -403,15 +373,11 @@@ struct fnic + struct work_struct frame_work; + struct work_struct flush_work; + struct sk_buff_head frame_queue; + - struct list_head tx_queue; + - mempool_t *frame_pool; + - mempool_t *frame_elem_pool; + - struct work_struct tport_work; + - struct list_head tport_event_list; + + struct sk_buff_head tx_queue; + ++ char subsys_desc[14]; ++ int subsys_desc_len; ++ + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; +@@@ -398,4 -511,76 +475,79 @@@ fnic_chk_state_flags_locked(struct fni + } + void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +++<<<<<<< HEAD +++======= ++ void fnic_free_txq(struct list_head *head); ++ int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, ++ char **subsys_desc); ++ ++ struct fnic_scsi_iter_data { ++ struct fnic *fnic; ++ void *data1; ++ void *data2; ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2); ++ }; ++ ++ static inline bool ++ fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) ++ { ++ struct fnic_scsi_iter_data *iter = iter_data; ++ ++ return iter->fn(iter->fnic, sc, iter->data1, iter->data2); ++ } ++ ++ static inline void ++ fnic_scsi_io_iter(struct fnic *fnic, ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2), ++ void *data1, void *data2) ++ { ++ struct fnic_scsi_iter_data iter_data = { ++ .fn = fn, ++ .fnic = fnic, ++ .data1 = data1, ++ .data2 = data2, ++ }; ++ scsi_host_busy_iter(fnic->lport->host, fnic_io_iter_handler, &iter_data); ++ } ++ ++ #ifdef FNIC_DEBUG ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) ++ { ++ int i; ++ ++ for (i = 0; i < len; i = i+8) { ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, ++ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], ++ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); ++ } ++ } ++ ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ int len, char *pfx) ++ { ++ uint32_t s_id, d_id; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", ++ pfx, s_id, d_id, fchdr->fh_type, ++ FNIC_STD_GET_OX_ID(fchdr), len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)fchdr, len); ++ ++ } ++ #else /* FNIC_DEBUG */ ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ uint32_t len, char *pfx) {} ++ #endif /* FNIC_DEBUG */ +++>>>>>>> 2c77081969ee (scsi: fnic: Add Cisco hardware model names) + #endif /* _FNIC_H_ */ +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,c1c10731906f..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -601,6 -610,8 +601,11 @@@ static int fnic_probe(struct pci_dev *p + int fnic_id = 0; + int i; + unsigned long flags; +++<<<<<<< HEAD +++======= ++ int hwq; ++ char *desc, *subsys_desc; +++>>>>>>> 2c77081969ee (scsi: fnic: Add Cisco hardware model names) + + /* + * Allocate SCSI Host and set up association between host, +@@@ -634,10 -645,14 +639,21 @@@ + fnic->fnic_num = fnic_id; + fnic_stats_debugfs_init(fnic); + +++<<<<<<< HEAD + + /* Setup PCI resources */ + + pci_set_drvdata(pdev, fnic); + + + + fnic->pdev = pdev; +++======= ++ /* Find model name from PCIe subsys ID */ ++ if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) ++ dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc); ++ else { ++ fnic->subsys_desc_len = 0; ++ dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown", ++ pdev->subsystem_device); ++ } +++>>>>>>> 2c77081969ee (scsi: fnic: Add Cisco hardware model names) + + err = pci_enable_device(pdev); + if (err) { +diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile +index 6214a6b2e96d..9d7083fce420 100644 +--- a/drivers/scsi/fnic/Makefile ++++ b/drivers/scsi/fnic/Makefile +@@ -15,4 +15,5 @@ fnic-y := \ + vnic_intr.o \ + vnic_rq.o \ + vnic_wq_copy.o \ +- vnic_wq.o ++ vnic_wq.o \ ++ fnic_pci_subsys_devid.o +* Unmerged path drivers/scsi/fnic/fnic.h +* Unmerged path drivers/scsi/fnic/fnic_main.c +diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c +new file mode 100644 +index 000000000000..36a2c1268422 +--- /dev/null ++++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c +@@ -0,0 +1,131 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright 2008 Cisco Systems, Inc. All rights reserved. ++ * Copyright 2007 Nuova Systems, Inc. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "fnic.h" ++ ++static struct fnic_pcie_device fnic_pcie_device_table[] = { ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA, ++ "VIC 1280"}, ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI, ++ "VIC 1240"}, ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", ++ PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"}, ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE, ++ "VIC 1285"}, ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", ++ PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"}, ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", ++ PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"}, ++ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE, ++ "VIC 1227T"}, ++ ++ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA, ++ "VIC 1340"}, ++ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW, ++ "VIC 1380"}, ++ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN, ++ "C3260-SIOC"}, ++ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE, ++ "VIC 1385"}, ++ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2, ++ "C3260-SIOC"}, ++ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT, ++ "VIC 1387"}, ++ ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY, ++ "VIC 1457"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", ++ PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", ++ PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE, ++ "VIC 1485"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", ++ PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", ++ PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA, ++ "VIC 1495"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT, ++ "VIC 1497"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE, ++ "VIC 1467"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON, ++ "VIC 1477"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", ++ PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"}, ++ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", ++ PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"}, ++ ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN, ++ "VIC 15420"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", ++ PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW, ++ "VIC 15411"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", ++ PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU, ++ "VIC 15238"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA, ++ "VIC 15422"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", ++ PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", ++ PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", ++ PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH, ++ "VIC 15230"}, ++ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA, ++ "VIC 15427"}, ++ ++ {0,} ++}; ++ ++int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, ++ char **subsys_desc) ++{ ++ unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC; ++ int max = ARRAY_SIZE(fnic_pcie_device_table); ++ struct fnic_pcie_device *t = fnic_pcie_device_table; ++ int index = 0; ++ ++ if (pdev->device != device) ++ return 1; ++ ++ while (t->device != 0) { ++ if (memcmp ++ ((char *) &pdev->subsystem_device, ++ (char *) &t->subsystem_device, sizeof(short)) == 0) ++ break; ++ t++; ++ index++; ++ } ++ ++ if (index >= max - 1) { ++ *desc = NULL; ++ *subsys_desc = NULL; ++ return 1; ++ } ++ ++ *desc = fnic_pcie_device_table[index].desc; ++ *subsys_desc = fnic_pcie_device_table[index].subsys_desc; ++ return 0; ++} diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/38945c2b.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/38945c2b.failed new file mode 100644 index 0000000000000..f6b28c1be5d29 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/38945c2b.failed @@ -0,0 +1,63 @@ +scsi: fnic: unlock on error path in fnic_queuecommand() + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Dan Carpenter +commit 38945c2b006b23a1a7a0c88d76e3294c6199891c +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/38945c2b.failed + +Call spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags) before +returning. + +Fixes: c81df08cd294 ("scsi: fnic: Add support for multiqueue (MQ) in fnic driver") + Signed-off-by: Dan Carpenter +Link: https://lore.kernel.org/r/5360fa20-74bc-4c22-a78e-ea8b18c5410d@moroto.mountain + Reviewed-by: Karan Tilak Kumar + Signed-off-by: Martin K. Petersen +(cherry picked from commit 38945c2b006b23a1a7a0c88d76e3294c6199891c) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_scsi.c +index a9f65dc3f089,8d7fc5284293..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -557,14 -538,25 +557,31 @@@ static int fnic_queuecommand_lck(struc + io_lock_acquired = 1; + io_req->port_id = rport->port_id; + io_req->start_time = jiffies; +++<<<<<<< HEAD + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_SP(sc) = (char *)io_req; + + CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; + + sc->scsi_done = done; +++======= ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; ++ fnic_priv(sc)->io_req = io_req; ++ fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; ++ io_req->sc = sc; ++ ++ if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) { ++ WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n", ++ fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag)); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req; ++ io_req->tag = mqtag; +++>>>>>>> 38945c2b006b (scsi: fnic: unlock on error path in fnic_queuecommand()) + + /* create copy wq desc and enqueue it */ + - wq = &fnic->hw_copy_wq[hwq]; + - atomic64_inc(&fnic_stats->io_stats.ios[hwq]); + - ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq); + + wq = &fnic->hw_copy_wq[0]; + + ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); + if (ret) { + /* + * In case another thread cancelled the request, +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3986001c.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3986001c.failed new file mode 100644 index 0000000000000..91884cf095aa8 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3986001c.failed @@ -0,0 +1,165 @@ +scsi: fnic: Return appropriate error code from failure of scsi drv init + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 3986001ca11ec630d631467d788aac513c61cb52 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3986001c.failed + +Return appropriate error code from fnic_probe caused by failure of +fnic_scsi_drv_init. Fix bug report. + + Suggested-by: Dan Carpenter + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250110091842.17711-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 3986001ca11ec630d631467d788aac513c61cb52) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,9ed42d5819d5..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -854,58 -967,90 +854,85 @@@ static int fnic_probe(struct pci_dev *p + } + } + + - init_completion(&fnic->reset_completion_wait); + + /* + + * Initialization done with PCI system, hardware, firmware. + + * Add host to SCSI + + */ + + err = scsi_add_host(lp->host, &pdev->dev); + + if (err) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic: scsi_add_host failed...exiting\n"); + + goto err_out_free_rq_buf; + + } + + - /* Start local port initialization */ + - iport->max_flogi_retries = fnic->config.flogi_retries; + - iport->max_plogi_retries = fnic->config.plogi_retries; + - iport->plogi_timeout = fnic->config.plogi_timeout; + - iport->service_params = + - (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS | + - FNIC_FCP_SP_CONF_CMPL); + - if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + - iport->service_params |= FNIC_FCP_SP_RETRY; + + /* Start local port initiatialization */ + + - iport->boot_time = jiffies; + - iport->e_d_tov = fnic->config.ed_tov; + - iport->r_a_tov = fnic->config.ra_tov; + - iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT; + - iport->wwpn = fnic->config.port_wwn; + - iport->wwnn = fnic->config.node_wwn; + + lp->link_up = 0; + + - iport->max_payload_size = fnic->config.maxdatafieldsize; + + lp->max_retry_count = fnic->config.flogi_retries; + + lp->max_rport_retry_count = fnic->config.plogi_retries; + + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + + FCP_SPPF_CONF_COMPL); + + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + + lp->service_params |= FCP_SPPF_RETRY; + + - if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) || + - (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) || + - ((iport->max_payload_size % 4) != 0)) { + - iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN; + - } + + lp->boot_time = jiffies; + + lp->e_d_tov = fnic->config.ed_tov; + + lp->r_a_tov = fnic->config.ra_tov; + + lp->link_supported_speeds = FC_PORTSPEED_10GBIT; + + fc_set_wwnn(lp, fnic->config.node_wwn); + + fc_set_wwpn(lp, fnic->config.port_wwn); + + - iport->flags |= FNIC_FIRST_LINK_UP; + + fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); + + - timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback, + - 0); + + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, + + FCPIO_HOST_EXCH_RANGE_END, NULL)) { + + err = -ENOMEM; + + goto err_out_remove_scsi_host; + + } + + + fc_lport_init_stats(lp); + fnic->stats_reset_time = jiffies; + + - INIT_WORK(&fnic->link_work, fnic_handle_link); + - INIT_WORK(&fnic->frame_work, fnic_handle_frame); + - INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); + - INIT_WORK(&fnic->flush_work, fnic_flush_tx); + - + - INIT_LIST_HEAD(&fnic->frame_queue); + - INIT_LIST_HEAD(&fnic->tx_queue); + - INIT_LIST_HEAD(&fnic->tport_event_list); + + fc_lport_config(lp); + + - INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry, + - fdls_schedule_oxid_free_retry_work); + - + - /* Initialize the oxid reclaim list and work struct */ + - INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list); + - INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler); + - + - /* Enable all queues */ + - for (i = 0; i < fnic->raw_wq_count; i++) + - vnic_wq_enable(&fnic->wq[i]); + - for (i = 0; i < fnic->rq_count; i++) { + - if (!ioread32(&fnic->rq[i].ctrl->enable)) + - vnic_rq_enable(&fnic->rq[i]); + + if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + + + sizeof(struct fc_frame_header))) { + + err = -EINVAL; + + goto err_out_free_exch_mgr; + } + - for (i = 0; i < fnic->wq_copy_count; i++) + - vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); + + fc_host_maxframe_size(lp->host) = lp->mfs; + + fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; + +++<<<<<<< HEAD + + sprintf(fc_host_symbolic_name(lp->host), + + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); +++======= ++ vnic_dev_enable(fnic->vdev); ++ ++ err = fnic_request_intr(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); ++ goto err_out_fnic_request_intr; ++ } ++ ++ fnic_notify_timer_start(fnic); ++ ++ fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE)); ++ ++ err = fnic_scsi_drv_init(fnic); ++ if (err) ++ goto err_out_scsi_drv_init; ++ ++ err = fnic_stats_debugfs_init(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n"); ++ goto err_out_free_stats_debugfs; ++ } ++ ++ for (i = 0; i < fnic->intr_count; i++) ++ vnic_intr_unmask(&fnic->intr[i]); +++>>>>>>> 3986001ca11e (scsi: fnic: Return appropriate error code from failure of scsi drv init) + + spin_lock_irqsave(&fnic_list_lock, flags); + list_add_tail(&fnic->list, &fnic_list); +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3df9dd0d.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3df9dd0d.failed new file mode 100644 index 0000000000000..3b1374ce38052 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3df9dd0d.failed @@ -0,0 +1,120 @@ +scsi: fnic: Add and improve log messages + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 3df9dd0d51c2e4b0c4a400f8ce94308a2d93ef61 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/3df9dd0d.failed + +Add link related log messages in fnic_fcs.c, +Improve log message in fnic_fcs.c, +Add log message in vnic_dev.c. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20231211173617.932990-4-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 3df9dd0d51c2e4b0c4a400f8ce94308a2d93ef61) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_fcs.c +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 1885218f9d15,5e312a55cc7d..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -157,16 -149,18 +161,22 @@@ void fnic_handle_link(struct work_struc + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + - fnic_fc_trace_set_data(fnic->lport->host->host_no, + - FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + - strlen("Link Status: DOWN_UP_VLAN")); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + + strlen("Link Status: DOWN_UP_VLAN")); + fnic_fcoe_send_vlan_req(fnic); + - + return; + } +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); +++======= ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "down->up: Link up\n"); +++>>>>>>> 3df9dd0d51c2 (scsi: fnic: Add and improve log messages) + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + - "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); + + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); + fcoe_ctlr_link_up(&fnic->ctlr); + } else { + /* UP -> DOWN */ +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 32748e3ba0be..1d48ff1c85bf 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -143,13 +143,17 @@ do { \ + } while (0); \ + } while (0) + +-#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \ ++#define FNIC_MAIN_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \ +- shost_printk(kern_level, host, fmt, ##args);) ++ shost_printk(kern_level, host, \ ++ "fnic<%d>: %s: %d: " fmt, fnic_num,\ ++ __func__, __LINE__, ##args);) + +-#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \ ++#define FNIC_FCS_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ +- shost_printk(kern_level, host, fmt, ##args);) ++ shost_printk(kern_level, host, \ ++ "fnic<%d>: %s: %d: " fmt, fnic_num,\ ++ __func__, __LINE__, ##args);) + + #define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ +* Unmerged path drivers/scsi/fnic/fnic_fcs.c +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index 3bd3865cdf63..10dfda854219 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -221,7 +221,7 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { +- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "fnic: Get vnic stats failed" + " 0x%x", ret); + return stats; +@@ -333,7 +333,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host) + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { +- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "fnic: Reset vnic stats failed" + " 0x%x", ret); + return; +diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c +index 5988c300cc82..f95f5d692bcc 100644 +--- a/drivers/scsi/fnic/vnic_dev.c ++++ b/drivers/scsi/fnic/vnic_dev.c +@@ -155,6 +155,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, + vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; + } + ++ pr_info("res_type_wq: %d res_type_rq: %d res_type_cq: %d res_type_intr_ctrl: %d\n", ++ vdev->res[RES_TYPE_WQ].count, vdev->res[RES_TYPE_RQ].count, ++ vdev->res[RES_TYPE_CQ].count, vdev->res[RES_TYPE_INTR_CTRL].count); ++ + return 0; + } + diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/514f0c40.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/514f0c40.failed new file mode 100644 index 0000000000000..d0e9d1ee4abe3 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/514f0c40.failed @@ -0,0 +1,179 @@ +scsi: fnic: Fix sg_reset success path + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 514f0c400bde6b62405467daaf2a0a86bcf7794b +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/514f0c40.failed + +sg_reset performs a target or LUN reset. Since the command is issued by the +user, it does not come into the driver with a tag or a queue id. Fix the +fnic driver to create an io_req and use a SCSI command tag. Fix the ITMF +path to special case the sg_reset response. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Tested-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20230919182436.6895-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 514f0c400bde6b62405467daaf2a0a86bcf7794b) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic.h +index 38c8614e6aed,22cef283b2b9..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -39,7 -27,7 +39,11 @@@ + + #define DRV_NAME "fnic" + #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +++<<<<<<< HEAD + +#define DRV_VERSION "1.6.0.55" +++======= ++ #define DRV_VERSION "1.6.0.57" +++>>>>>>> 514f0c400bde (scsi: fnic: Fix sg_reset success path) + #define PFX DRV_NAME ": " + #define DFX DRV_NAME "%d: " + +@@@ -235,6 -236,9 +239,12 @@@ struct fnic + unsigned int wq_count; + unsigned int cq_count; + +++<<<<<<< HEAD +++======= ++ struct mutex sgreset_mutex; ++ spinlock_t sgreset_lock; /* lock for sgreset */ ++ struct scsi_cmnd *sgreset_sc; +++>>>>>>> 514f0c400bde (scsi: fnic: Fix sg_reset success path) + struct dentry *fnic_stats_debugfs_host; + struct dentry *fnic_stats_debugfs_file; + struct dentry *fnic_reset_debugfs_file; +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 19e3244e4ce8,416d81954819..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -1090,12 -1091,12 +1106,12 @@@ static void fnic_fcpio_itmf_cmpl_handle + atomic64_inc(&fnic_stats->io_stats.sc_null); + shost_printk(KERN_ERR, fnic->lport->host, + "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", +- fnic_fcpio_status_to_str(hdr_status), id); ++ fnic_fcpio_status_to_str(hdr_status), tag); + return; + } +- io_lock = fnic_io_lock_hash(fnic, sc); ++ + spin_lock_irqsave(io_lock, flags); + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + WARN_ON_ONCE(!io_req); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); +@@@ -2250,18 -2241,25 +2270,24 @@@ int fnic_device_reset(struct scsi_cmnd + + if (unlikely(tag < 0)) { + /* + - * For device reset issued through sg3utils, we let + - * only one LUN_RESET to go through and use a special + - * tag equal to max_tag_id so that we don't have to allocate + - * or free it. It won't interact with tags + - * allocated by mid layer. + + * Really should fix the midlayer to pass in a proper + + * request for ioctls... + */ + - mutex_lock(&fnic->sgreset_mutex); + - tag = fnic->fnic_max_tag_id; + + tag = fnic_scsi_host_start_tag(fnic, sc); + + if (unlikely(tag == SCSI_NO_TAG)) + + goto fnic_device_reset_end; + + tag_gen_flag = 1; + new_sc = 1; +- } +- io_lock = fnic_io_lock_hash(fnic, sc); ++ fnic->sgreset_sc = sc; ++ io_lock = &fnic->sgreset_lock; ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, ++ "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n", ++ rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag); ++ } else ++ io_lock = fnic_io_lock_hash(fnic, sc); ++ + spin_lock_irqsave(io_lock, flags); + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + /* + * If there is a io_req attached to this command, then use it, +@@@ -2275,11 -2273,13 +2301,17 @@@ + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; +++<<<<<<< HEAD + + CMD_SP(sc) = (char *)io_req; +++======= ++ io_req->tag = tag; ++ io_req->sc = sc; ++ fnic_priv(sc)->io_req = io_req; +++>>>>>>> 514f0c400bde (scsi: fnic: Fix sg_reset success path) + } + io_req->dr_done = &tm_done; + - fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + - fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); +@@@ -2426,11 -2426,12 +2458,18 @@@ fnic_device_reset_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + +++<<<<<<< HEAD + + /* free tag if it is allocated */ + + if (unlikely(tag_gen_flag)) + + fnic_scsi_host_end_tag(fnic, sc); +++======= ++ if (new_sc) { ++ fnic->sgreset_sc = NULL; ++ mutex_unlock(&fnic->sgreset_mutex); ++ } +++>>>>>>> 514f0c400bde (scsi: fnic: Fix sg_reset success path) + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Returning from device reset %s\n", +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h +index 1cb6a68c8e4e..5c415d8c9502 100644 +--- a/drivers/scsi/fnic/fnic_io.h ++++ b/drivers/scsi/fnic/fnic_io.h +@@ -64,6 +64,8 @@ struct fnic_io_req { + unsigned long start_time; /* in jiffies */ + struct completion *abts_done; /* completion for abts */ + struct completion *dr_done; /* completion for device reset */ ++ unsigned int tag; ++ struct scsi_cmnd *sc; /* midlayer's cmd pointer */ + }; + + enum fnic_port_speeds { +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index bd44974d269f..d50fea80a978 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -765,6 +765,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + for (i = 0; i < FNIC_IO_LOCKS; i++) + spin_lock_init(&fnic->io_req_lock[i]); + ++ spin_lock_init(&fnic->sgreset_lock); ++ + err = -ENOMEM; + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/52f6e196.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/52f6e196.failed new file mode 100644 index 0000000000000..3668334774e35 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/52f6e196.failed @@ -0,0 +1,69 @@ +scsi: fnic: Add support for multiqueue (MQ) in fnic_main.c + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 52f6e196e52ef834f928aac297d895f4c32276ea +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/52f6e196.failed + +Set map_queues in the fnic_host_template to fnic_mq_map_queues_cpus. +Define fnic_mq_map_queues_cpus to set cpu assignment to fnic queues. +Refactor code in fnic_probe to enable vnic queues before scsi_add_host. +Modify notify set to the correct index. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Hannes Reinecke + Reviewed-by: John Garry + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20231211173617.932990-11-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 52f6e196e52ef834f928aac297d895f4c32276ea) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 85e3a51a16fc,7fee8a024edb..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -123,8 -113,10 +125,13 @@@ static struct scsi_host_template fnic_h + .can_queue = FNIC_DFLT_IO_REQ, + .sg_tablesize = FNIC_MAX_SG_DESC_CNT, + .max_sectors = 0xffff, + - .shost_groups = fnic_host_groups, + + .shost_attrs = fnic_attrs, + .track_queue_depth = 1, +++<<<<<<< HEAD +++======= ++ .cmd_size = sizeof(struct fnic_cmd_priv), ++ .map_queues = fnic_mq_map_queues_cpus, +++>>>>>>> 52f6e196e52e (scsi: fnic: Add support for multiqueue (MQ) in fnic_main.c) + }; + + static void +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 9ebca3720f82..243f976cebb5 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -108,7 +108,7 @@ + #define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */ + + #define FNIC_MAX_FCP_TARGET 256 +- ++#define FNIC_PCI_OFFSET 2 + /** + * state_flags to identify host state along along with fnic's state + **/ +@@ -378,7 +378,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, + int fnic_fw_reset_handler(struct fnic *fnic); + void fnic_terminate_rport_io(struct fc_rport *); + const char *fnic_state_to_str(unsigned int state); +- ++void fnic_mq_map_queues_cpus(struct Scsi_Host *host); + void fnic_log_q_error(struct fnic *fnic); + void fnic_handle_link_event(struct fnic *fnic); + +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/53021c19.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/53021c19.failed new file mode 100644 index 0000000000000..5212e345be495 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/53021c19.failed @@ -0,0 +1,39 @@ +scsi: fnic: Increment driver version + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 53021c192cc55074eee744cb41dcdfb9318d1f80 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/53021c19.failed + +Increment driver version for multiqueue (MQ). + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20231211173617.932990-14-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 53021c192cc55074eee744cb41dcdfb9318d1f80) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,2074937c05bc..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -39,7 -27,7 +39,11 @@@ + + #define DRV_NAME "fnic" + #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +++<<<<<<< HEAD + +#define DRV_VERSION "1.6.0.55" +++======= ++ #define DRV_VERSION "1.7.0.0" +++>>>>>>> 53021c192cc5 (scsi: fnic: Increment driver version) + #define PFX DRV_NAME ": " + #define DFX DRV_NAME "%d: " + +* Unmerged path drivers/scsi/fnic/fnic.h diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/54428671.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/54428671.failed new file mode 100644 index 0000000000000..8e8c9d754323f --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/54428671.failed @@ -0,0 +1,193 @@ +scsi: fnic: Test for memory allocation failure and return error code + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 54428671aac88dd11074c47cb7e7726e41d40f4a +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/54428671.failed + +Fix kernel test robot warning. Test for memory allocation failure, and +free memory for queues allocated in a multiqueue and non-multiqueue +scenario. Return appropriate error code. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/r/202412312347.FE4ZgEoM-lkp@intel.com/ + Reported-by: Julia Lawall +Closes: https://lore.kernel.org/r/202412312347.FE4ZgEoM-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250110091924.17729-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 54428671aac88dd11074c47cb7e7726e41d40f4a) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,f8fe67bebf3e..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -558,9 -572,31 +558,30 @@@ static void fnic_set_vlan(struct fnic * + vnic_dev_set_default_vlan(fnic->vdev, vlan_id); + } + +++<<<<<<< HEAD +++======= ++ static void fnic_scsi_init(struct fnic *fnic) ++ { ++ struct Scsi_Host *host = fnic->host; ++ ++ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, ++ host->host_no); ++ ++ host->transportt = fnic_fc_transport; ++ } ++ ++ static void fnic_free_ioreq_tables_mq(struct fnic *fnic) ++ { ++ int hwq; ++ ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) ++ kfree(fnic->sw_copy_wq[hwq].io_req_table); ++ } ++ +++>>>>>>> 54428671aac8 (scsi: fnic: Test for memory allocation failure and return error code) + static int fnic_scsi_drv_init(struct fnic *fnic) + { + - struct Scsi_Host *host = fnic->host; + - int err; + - struct pci_dev *pdev = fnic->pdev; + - struct fnic_iport_s *iport = &fnic->iport; + - int hwq; + + struct Scsi_Host *host = fnic->lport->host; + + /* Configure maximum outstanding IO reqs*/ + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) +@@@ -571,23 -607,71 +592,81 @@@ + fnic->fnic_max_tag_id = host->can_queue; + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; + - host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; + + host->max_cmd_len = FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + + if (host->nr_hw_queues > 1) + + shost_printk(KERN_ERR, host, + + "fnic: blk-mq is not supported"); + + + + host->nr_hw_queues = fnic->wq_copy_count = 1; + + - dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", + + shost_printk(KERN_INFO, host, + + "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + + - dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + + shost_printk(KERN_INFO, host, + + "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + +++<<<<<<< HEAD +++======= ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { ++ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; ++ fnic->sw_copy_wq[hwq].io_req_table = ++ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * ++ sizeof(struct fnic_io_req *), GFP_KERNEL); ++ ++ if (!fnic->sw_copy_wq[hwq].io_req_table) { ++ fnic_free_ioreq_tables_mq(fnic); ++ return -ENOMEM; ++ } ++ } ++ ++ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", ++ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); ++ ++ fnic_scsi_init(fnic); ++ ++ err = scsi_add_host(fnic->host, &pdev->dev); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); ++ return -1; ++ } ++ fc_host_maxframe_size(fnic->host) = iport->max_payload_size; ++ fc_host_dev_loss_tmo(fnic->host) = ++ fnic->config.port_down_timeout / 1000; ++ sprintf(fc_host_symbolic_name(fnic->host), ++ DRV_NAME " v" DRV_VERSION " over %s", fnic->name); ++ fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT; ++ fc_host_node_name(fnic->host) = iport->wwnn; ++ fc_host_port_name(fnic->host) = iport->wwpn; ++ fc_host_supported_classes(fnic->host) = FC_COS_CLASS3; ++ memset(fc_host_supported_fc4s(fnic->host), 0, ++ sizeof(fc_host_supported_fc4s(fnic->host))); ++ fc_host_supported_fc4s(fnic->host)[2] = 1; ++ fc_host_supported_fc4s(fnic->host)[7] = 1; ++ fc_host_supported_speeds(fnic->host) = 0; ++ fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT; ++ ++ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data); ++ if (fnic->host->shost_data != NULL) { ++ if (fnic_tgt_id_binding == 0) { ++ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); ++ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE; ++ } else { ++ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); ++ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN; ++ } ++ } ++ ++ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); ++ if (!fnic->io_req_pool) { ++ scsi_remove_host(fnic->host); ++ return -ENOMEM; ++ } ++ +++>>>>>>> 54428671aac8 (scsi: fnic: Test for memory allocation failure and return error code) + return 0; + } + +@@@ -941,16 -1071,25 +1020,30 @@@ static int fnic_probe(struct pci_dev *p + + return 0; + +++<<<<<<< HEAD + +err_out_free_exch_mgr: + + fc_exch_mgr_free(lp); + +err_out_remove_scsi_host: + + fc_remove_host(lp->host); + + scsi_remove_host(lp->host); + +err_out_free_rq_buf: + + for (i = 0; i < fnic->rq_count; i++) +++======= ++ err_out_free_stats_debugfs: ++ fnic_stats_debugfs_remove(fnic); ++ fnic_free_ioreq_tables_mq(fnic); ++ scsi_remove_host(fnic->host); ++ err_out_scsi_drv_init: ++ fnic_free_intr(fnic); ++ err_out_fnic_request_intr: ++ err_out_alloc_rq_buf: ++ for (i = 0; i < fnic->rq_count; i++) { ++ if (ioread32(&fnic->rq[i].ctrl->enable)) ++ vnic_rq_disable(&fnic->rq[i]); +++>>>>>>> 54428671aac8 (scsi: fnic: Test for memory allocation failure and return error code) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + - } + vnic_dev_notify_unset(fnic->vdev); + -err_out_fnic_notify_set: + - mempool_destroy(fnic->frame_elem_pool); + -err_out_fdls_frame_elem_pool: + - mempool_destroy(fnic->frame_pool); + -err_out_fdls_frame_pool: + +err_out_free_max_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); + err_out_free_dflt_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/55cf7152.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/55cf7152.failed new file mode 100644 index 0000000000000..a62395f16b8cb --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/55cf7152.failed @@ -0,0 +1,991 @@ +scsi: fnic: Improve logs and add support for multiqueue (MQ) + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 55cf715244a7dfda42191445d97628e837158091 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/55cf7152.failed + +Improve existing logs by adding fnic number, hardware queue, tag, and mqtag +in the prints. Add logs with the above elements for effective debugging. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Tested-by: Karan Tilak Kumar + Reviewed-by: Hannes Reinecke + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20231211173617.932990-13-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 55cf715244a7dfda42191445d97628e837158091) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_scsi.c +index a9f65dc3f089,4d6db4509e75..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -366,14 -338,11 +366,18 @@@ static inline int fnic_queue_wq_copy_de + int_to_scsilun(sc->device->lun, &fc_lun); + + /* Enqueue the descriptor in the Copy WQ */ + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (unlikely(!vnic_wq_copy_desc_avail(wq))) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "fnic_queue_wq_copy_desc failure - no descriptors\n"); + atomic64_inc(&misc_stats->io_cpwq_alloc_failures); + return SCSI_MLQUEUE_HOST_BUSY; +@@@ -434,19 -399,33 +438,43 @@@ static int fnic_queuecommand_lck(struc + int sg_count = 0; + unsigned long flags = 0; + unsigned long ptr; + + spinlock_t *io_lock = NULL; + int io_lock_acquired = 0; + struct fc_rport_libfc_priv *rp; + - uint16_t hwq = 0; + +++<<<<<<< HEAD + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) +++======= ++ mqtag = blk_mq_unique_tag(rq); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", ++ fnic->state_flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return SCSI_MLQUEUE_HOST_BUSY; + - } + +++<<<<<<< HEAD + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) +++======= ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", ++ fnic->state_flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return SCSI_MLQUEUE_HOST_BUSY; + - } + + rport = starget_to_rport(scsi_target(sc->device)); + if (!rport) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "returning DID_NO_CONNECT for IO as rport is NULL\n"); + sc->result = DID_NO_CONNECT << 16; + done(sc); +@@@ -455,7 -434,8 +483,12 @@@ + + ret = fc_remote_port_chkready(rport); + if (ret) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "rport is not ready\n"); + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = ret; +@@@ -465,7 -445,8 +498,12 @@@ + + rp = rport->dd_data; + if (!rp || rp->rp_state == RPORT_ST_DELETE) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "rport 0x%x removed, returning DID_NO_CONNECT\n", + rport->port_id); + +@@@ -476,7 -457,8 +514,12 @@@ + } + + if (rp->rp_state != RPORT_ST_READY) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", + rport->port_id, rp->rp_state); + +@@@ -485,8 -467,13 +528,16 @@@ + return 0; + } + +++<<<<<<< HEAD + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) +++======= ++ if (lp->state != LPORT_ST_READY || !(lp->link_up)) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "state not ready: %d/link not up: %d Returning HOST_BUSY\n", ++ lp->state, lp->link_up); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return SCSI_MLQUEUE_HOST_BUSY; + - } + + atomic_inc(&fnic->in_flight); + +@@@ -833,18 -818,38 +882,41 @@@ static void fnic_fcpio_icmnd_cmpl_handl + u64 cmd_trace; + unsigned long start_time; + unsigned long io_duration_time; + - unsigned int hwq = 0; + - unsigned int mqtag = 0; + - unsigned int tag = 0; + + /* Decode the cmpl description to get the io_req id */ + - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + - fcpio_tag_id_dec(&ftag, &id); + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + fcpio_tag_id_dec(&tag, &id); + icmnd_cmpl = &desc->u.icmnd_cmpl; + +++<<<<<<< HEAD + + if (id >= fnic->fnic_max_tag_id) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "Tag out of range tag %x hdr status = %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); +++======= ++ mqtag = id; ++ tag = blk_mq_unique_tag_to_tag(mqtag); ++ hwq = blk_mq_unique_tag_to_hwq(mqtag); ++ ++ if (hwq != cq_index) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hdr status: %s icmnd completion on the wrong queue\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ } ++ ++ if (tag >= fnic->fnic_max_tag_id) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hdr status: %s Out of range tag\n", ++ fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return; + } + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + sc = scsi_host_find_tag(fnic->lport->host, id); + WARN_ON_ONCE(!sc); +@@@ -894,13 -905,13 +966,13 @@@ + * set the FNIC_IO_DONE so that this doesn't get + * flagged as 'out of order' if it was not aborted + */ + - fnic_priv(sc)->flags |= FNIC_IO_DONE; + - fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_FLAGS(sc) |= FNIC_IO_DONE; + + CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; + + spin_unlock_irqrestore(io_lock, flags); + if(FCPIO_ABORTED == hdr_status) + - fnic_priv(sc)->flags |= FNIC_IO_ABORTED; + + CMD_FLAGS(sc) |= FNIC_IO_ABORTED; + +- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + "icmnd_cmpl abts pending " + "hdr status = %s tag = 0x%x sc = 0x%p " + "scsi_status = %x residual = %d\n", +@@@ -1071,16 -1083,42 +1143,47 @@@ static void fnic_fcpio_itmf_cmpl_handle + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; + + spinlock_t *io_lock; + unsigned long start_time; + - unsigned int hwq = cq_index; + - unsigned int mqtag; + - unsigned int tag; + + - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + - fcpio_tag_id_dec(&ftag, &id); + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + fcpio_tag_id_dec(&tag, &id); + +++<<<<<<< HEAD + + if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "Tag out of range tag %x hdr status = %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); +++======= ++ mqtag = id & FNIC_TAG_MASK; ++ tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK); ++ hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK); ++ ++ if (hwq != cq_index) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hdr status: %s ITMF completion on the wrong queue\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ } ++ ++ if (tag > fnic->fnic_max_tag_id) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hdr status: %s Tag out of range\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ return; ++ } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hdr status: %s Tag out of range\n", ++ fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return; + } + +@@@ -1112,17 -1162,22 +1215,31 @@@ + if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { + /* Abort and terminate completion of device reset req */ + /* REVISIT : Add asserts about various flags */ +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "dev reset abts cmpl recd. id %x status %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + + CMD_ABTS_STATUS(sc) = hdr_status; + + CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n", ++ hwq, mqtag, tag, ++ fnic_fcpio_status_to_str(hdr_status)); ++ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; ++ fnic_priv(sc)->abts_status = hdr_status; ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + if (io_req->abts_done) + complete(io_req->abts_done); + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + } else if (id & FNIC_TAG_ABORT) { + /* Completion of abort cmd */ ++ shost_printk(KERN_DEBUG, fnic->lport->host, ++ "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n", ++ hwq, mqtag, tag, ++ fnic_fcpio_status_to_str(hdr_status)); + switch (hdr_status) { + case FCPIO_SUCCESS: + break; +@@@ -1164,12 -1219,12 +1281,12 @@@ + + /* If the status is IO not found consider it as success */ + if (hdr_status == FCPIO_IO_NOT_FOUND) + - fnic_priv(sc)->abts_status = FCPIO_SUCCESS; + + CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; + + - if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); + +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "abts cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); +@@@ -1181,87 -1236,89 +1298,139 @@@ + */ + if (io_req->abts_done) { + complete(io_req->abts_done); +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + } else { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "abts cmpl, completing IO\n"); + + CMD_SP(sc) = NULL; +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ shost_printk(KERN_INFO, fnic->lport->host, ++ "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n", ++ hwq, mqtag, tag); ++ } else { ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); ++ fnic_priv(sc)->io_req = NULL; +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + sc->result = (DID_ERROR << 16); + - fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + - FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + - sc->device->host->host_no, id, + - sc, + - jiffies_to_msecs(jiffies - start_time), + - desc, + - (((u64)hdr_status << 40) | + - (u64)sc->cmnd[0] << 32 | + - (u64)sc->cmnd[2] << 24 | + - (u64)sc->cmnd[3] << 16 | + - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + - scsi_done(sc); + - atomic64_dec(&fnic_stats->io_stats.active_ios); + - if (atomic64_read(&fnic->io_cmpl_skip)) + - atomic64_dec(&fnic->io_cmpl_skip); + - else + - atomic64_inc(&fnic_stats->io_stats.io_completions); + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + + sc->device->host->host_no, id, + + sc, + + jiffies_to_msecs(jiffies - start_time), + + desc, + + (((u64)hdr_status << 40) | + + (u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | + + CMD_STATE(sc))); + + sc->scsi_done(sc); + + atomic64_dec(&fnic_stats->io_stats.active_ios); + + if (atomic64_read(&fnic->io_cmpl_skip)) + + atomic64_dec(&fnic->io_cmpl_skip); + + else + + atomic64_inc(&fnic_stats->io_stats.io_completions); + + } + } + + + } else if (id & FNIC_TAG_DEV_RST) { + /* Completion of device reset */ +++<<<<<<< HEAD + + CMD_LR_STATUS(sc) = hdr_status; + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; + + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + + sc->device->host->host_no, id, sc, + + jiffies_to_msecs(jiffies - start_time), + + desc, 0, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Terminate pending " + + "dev reset cmpl recd. id %d status %s\n", + + (int)(id & FNIC_TAG_MASK), + + fnic_fcpio_status_to_str(hdr_status)); +++======= ++ shost_printk(KERN_INFO, fnic->lport->host, ++ "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); ++ fnic_priv(sc)->lr_status = hdr_status; ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; ++ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, ++ sc->device->host->host_no, id, sc, ++ jiffies_to_msecs(jiffies - start_time), ++ desc, 0, fnic_flags_and_state(sc)); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return; + } + - if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { + + if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { + /* Need to wait for terminate completion */ + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), +++<<<<<<< HEAD + + desc, 0, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ desc, 0, fnic_flags_and_state(sc)); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "dev reset cmpl recd after time out. " + "id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + return; + } +++<<<<<<< HEAD + + CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + + CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "dev reset cmpl recd. id %d status %s\n", + + (int)(id & FNIC_TAG_MASK), + + fnic_fcpio_status_to_str(hdr_status)); +++======= ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + if (io_req->dr_done) + complete(io_req->dr_done); + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + } else { + shost_printk(KERN_ERR, fnic->lport->host, +++<<<<<<< HEAD + + "Unexpected itmf io state %s tag %x\n", + + fnic_ioreq_state_to_str(CMD_STATE(sc)), id); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n", ++ __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + } + + } +@@@ -1355,16 -1414,31 +1524,34 @@@ static bool fnic_cleanup_io_iter(struc + struct fnic *fnic = data; + struct fnic_io_req *io_req; + unsigned long flags = 0; + + spinlock_t *io_lock; + unsigned long start_time = 0; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + - uint16_t hwq = 0; + - int tag; + - int mqtag; + + - mqtag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(mqtag); + - tag = blk_mq_unique_tag_to_tag(mqtag); + + io_lock = fnic_io_lock_tag(fnic, tag); + + spin_lock_irqsave(io_lock, flags); + +++<<<<<<< HEAD + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; ++ ++ io_req = fnic_priv(sc)->io_req; ++ if (!io_req) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", ++ hwq, mqtag, tag, fnic_priv(sc)->flags); ++ return true; ++ } ++ ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + /* + * We will be here only when FW completes reset + * without sending completions for outstanding ios. +@@@ -1397,11 -1467,10 +1584,11 @@@ + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + + +cleanup_scsi_cmd: + sc->result = DID_TRANSPORT_DISRUPTED << 16; +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +- "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", +- tag, sc, jiffies - start_time); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", ++ mqtag, tag, sc, (jiffies - start_time)); + + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); +@@@ -1480,20 -1544,18 +1667,20 @@@ void fnic_wq_copy_cleanup_handler(struc + + wq_copy_cleanup_scsi_cmd: + sc->result = DID_NO_CONNECT << 16; +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:" + " DID_NO_CONNECT\n"); + + - FNIC_TRACE(fnic_wq_copy_cleanup_handler, + - sc->device->host->host_no, id, sc, + - jiffies_to_msecs(jiffies - start_time), + - 0, ((u64)sc->cmnd[0] << 32 | + - (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_wq_copy_cleanup_handler, + + sc->device->host->host_no, id, sc, + + jiffies_to_msecs(jiffies - start_time), + + 0, ((u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + - scsi_done(sc); + + sc->scsi_done(sc); + + } + } + + static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, +@@@ -1512,17 -1574,17 +1699,17 @@@ + return 1; + } else + atomic_inc(&fnic->in_flight); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, flags); + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + atomic_dec(&fnic->in_flight); +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "fnic_queue_abort_io_req: failure: no descriptors\n"); + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); + return 1; +@@@ -1572,12 -1637,12 +1759,21 @@@ static bool fnic_rport_abort_io_iter(st + return true; + } + +++<<<<<<< HEAD + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", + + sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n", ++ hwq, abt_tag, fnic_priv(sc)->flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return true; + } + +@@@ -1614,10 -1679,10 +1810,10 @@@ + "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); + BUG_ON(io_req->abts_done); + +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "fnic_rport_reset_exch: Issuing abts\n"); + + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); +@@@ -1631,17 -1696,20 +1827,27 @@@ + * aborted later by scsi_eh, or cleaned up during + * lun reset + */ +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n", ++ hwq, abt_tag, fnic_priv(sc)->flags); ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) ++ fnic_priv(sc)->state = old_ioreq_state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + } else { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + else + - fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + iter_data->term_cnt++; + } +@@@ -1888,10 -1958,10 +2094,17 @@@ int fnic_abort_cmd(struct scsi_cmnd *sc + + /* IO out of order */ + +++<<<<<<< HEAD + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Issuing Host reset due to out of order IO\n"); +++======= ++ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Issuing host reset due to out of order IO\n"); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + + ret = FAILED; + goto fnic_abort_cmd_end; +@@@ -1935,9 -2006,9 +2148,9 @@@ fnic_abort_cmd_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "Returning from abort cmd type %x %s\n", task_req, + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); +@@@ -1962,15 -2038,15 +2175,15 @@@ static inline int fnic_queue_dr_io_req( + return FAILED; + } else + atomic_inc(&fnic->in_flight); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, intr_flags); + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "queue_dr_io_req failure - no descriptors\n"); + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); + ret = -EAGAIN; +@@@ -2033,20 -2113,19 +2246,28 @@@ static bool fnic_pending_aborts_iter(st + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "Found IO in %s on lun\n", + - fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + - if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + return true; + } +++<<<<<<< HEAD + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "%s dev rst not pending sc 0x%p\n", __func__, + + sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "dev rst not pending sc 0x%p\n", sc); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + return true; + } + +@@@ -2066,38 -2145,40 +2287,47 @@@ + + BUG_ON(io_req->abts_done); + +++<<<<<<< HEAD + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + + abt_tag |= FNIC_TAG_DEV_RST; + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "%s: dev rst sc 0x%p\n", __func__, sc); +++======= ++ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "dev rst sc 0x%p\n", sc); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + } + + - fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + io_req->abts_done = &tm_done; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, + FCPIO_ITMF_ABT_TASK_TERM, + - fc_lun.scsi_lun, io_req, hwq)) { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - io_req = fnic_priv(sc)->io_req; + + fc_lun.scsi_lun, io_req)) { + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (io_req) + io_req->abts_done = NULL; + - if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + - fnic_priv(sc)->state = old_ioreq_state; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + spin_unlock_irqrestore(io_lock, flags); + iter_data->ret = FAILED; ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d abt_tag: 0x%lx Abort could not be queued\n", ++ hwq, abt_tag); + return false; + } else { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); + } + - fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + + wait_for_completion_timeout(&tm_done, msecs_to_jiffies + (fnic->config.ed_tov)); +@@@ -2265,14 -2349,23 +2496,18 @@@ int fnic_device_reset(struct scsi_cmnd + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; + - io_req->tag = mqtag; + - fnic_priv(sc)->io_req = io_req; + - io_req->sc = sc; + - + - if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) + - WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n", + - fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); + - + - fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = + - io_req; + + CMD_SP(sc) = (char *)io_req; + } + io_req->dr_done = &tm_done; + - fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + - fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; + + spin_unlock_irqrestore(io_lock, flags); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + + /* + * issue the device reset, if enqueue failed, clean up the ioreq +@@@ -2296,12 -2389,12 +2531,18 @@@ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - io_req = fnic_priv(sc)->io_req; + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "io_req is null tag 0x%x sc 0x%p\n", tag, sc); +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + goto fnic_device_reset_end; + } + io_req->dr_done = NULL; +@@@ -2314,10 -2407,10 +2555,10 @@@ + */ + if (status == FCPIO_INVALID_CODE) { + atomic64_inc(&reset_stats->device_reset_timeouts); +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "Device reset timed out\n"); + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; + + spin_unlock_irqrestore(io_lock, flags); + int_to_scsilun(sc->device->lun, &fc_lun); + /* + * Issue abort and terminate on device reset request. +@@@ -2337,14 -2430,14 +2578,21 @@@ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); + } else { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + + spin_lock_irqsave(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + io_req->abts_done = &tm_done; +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Abort and terminate issued on Device reset " + + "tag 0x%x sc 0x%p\n", tag, sc); +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n", ++ mqtag, sc); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + break; + } + } +@@@ -2367,11 -2460,11 +2615,11 @@@ + + /* Completed, but not successful, clean up the io_req, return fail */ + if (status != FCPIO_SUCCESS) { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, +- fnic->lport->host, ++ fnic->lport->host, fnic->fnic_num, + "Device reset completed - failed\n"); + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + goto fnic_device_reset_clean; + } + +@@@ -2383,9 -2476,9 +2631,15 @@@ + * succeeds + */ + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + "Device reset failed" + " since could not abort all IOs\n"); + goto fnic_device_reset_clean; +@@@ -2416,13 -2512,14 +2670,13 @@@ fnic_device_reset_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + - if (new_sc) { + - fnic->sgreset_sc = NULL; + - mutex_unlock(&fnic->sgreset_mutex); + - } + + /* free tag if it is allocated */ + + if (unlikely(tag_gen_flag)) + + fnic_scsi_host_end_tag(fnic, sc); + +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + "Returning from device reset %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); +@@@ -2670,11 -2770,12 +2923,20 @@@ static bool fnic_abts_pending_iter(stru + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "Found IO in %s on lun\n", + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + cmd_state = CMD_STATE(sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d tag: 0x%x Found IO in state: %s on lun\n", ++ hwq, tag, ++ fnic_ioreq_state_to_str(fnic_priv(sc)->state)); ++ cmd_state = fnic_priv(sc)->state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 55cf715244a7 (scsi: fnic: Improve logs and add support for multiqueue (MQ)) + if (cmd_state == FNIC_IOREQ_ABTS_PENDING) + iter_data->ret = 1; + +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 9ebca3720f82..6abc780ff810 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -151,9 +151,11 @@ do { \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + shost_printk(kern_level, host, fmt, ##args);) + +-#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ ++#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ +- shost_printk(kern_level, host, fmt, ##args);) ++ shost_printk(kern_level, host, \ ++ "fnic<%d>: %s: %d: " fmt, fnic_num,\ ++ __func__, __LINE__, ##args);) + + #define FNIC_ISR_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/5b6179d4.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/5b6179d4.failed new file mode 100644 index 0000000000000..6e20f13d8fed0 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/5b6179d4.failed @@ -0,0 +1,28 @@ +scsi: fnic: Remove unnecessary else and unnecessary break in FDLS + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 5b6179d4b661e3c22ffa5f3fe2523bad4cd01983 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/5b6179d4.failed + +Incorporate review comments from Martin: + Remove unnecessary else and unnecessary break to fix warnings + in the FDLS code. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250106224451.3597-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 5b6179d4b661e3c22ffa5f3fe2523bad4cd01983) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6335be1c.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6335be1c.failed new file mode 100644 index 0000000000000..97cb216b21706 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6335be1c.failed @@ -0,0 +1,1867 @@ +scsi: fnic: Modify IO path to use FDLS + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 6335be1c5009f888367db095a0442cdb256980f8 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6335be1c.failed + +Modify IO path to use FDLS. + +Add helper functions to process IOs. + +Remove unused template functions. + +Clean up obsolete code. + +Refactor old function definitions. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202410210147.fQp7tYeb-lkp@intel.com/ + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-11-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 6335be1c5009f888367db095a0442cdb256980f8) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_main.c +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,1cfd9dcb5444..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -348,11 -479,14 +348,10 @@@ int fnic_set_intr_mode_msix(struct fni + void fnic_free_intr(struct fnic *fnic); + int fnic_request_intr(struct fnic *fnic); + +- int fnic_send(struct fc_lport *, struct fc_frame *); + void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); + void fnic_handle_frame(struct work_struct *work); + -void fnic_tport_event_handler(struct work_struct *work); + void fnic_handle_link(struct work_struct *work); + void fnic_handle_event(struct work_struct *work); + -void fdls_reclaim_oxid_handler(struct work_struct *work); + -void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid); + -void fdls_schedule_oxid_free_retry_work(struct work_struct *work); + int fnic_rq_cmpl_handler(struct fnic *fnic, int); + int fnic_alloc_rq_frame(struct vnic_rq *rq); + void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); +@@@ -365,12 -496,11 +364,20 @@@ void fnic_update_mac_locked(struct fni + int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); + int fnic_abort_cmd(struct scsi_cmnd *); + int fnic_device_reset(struct scsi_cmnd *); +++<<<<<<< HEAD + +int fnic_host_reset(struct scsi_cmnd *); + +int fnic_reset(struct Scsi_Host *); + +void fnic_scsi_cleanup(struct fc_lport *); + +void fnic_scsi_abort_io(struct fc_lport *); + +void fnic_empty_scsi_cleanup(struct fc_lport *); + +void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); +++======= ++ int fnic_eh_host_reset_handler(struct scsi_cmnd *sc); ++ int fnic_host_reset(struct Scsi_Host *shost); ++ void fnic_reset(struct Scsi_Host *shost); ++ int fnic_issue_fc_host_lip(struct Scsi_Host *shost); ++ void fnic_scsi_fcpio_reset(struct fnic *fnic); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index); + int fnic_wq_cmpl_handler(struct fnic *fnic, int); + int fnic_flogi_reg_handler(struct fnic *fnic, u32); +@@@ -379,10 -509,11 +386,11 @@@ void fnic_wq_copy_cleanup_handler(struc + int fnic_fw_reset_handler(struct fnic *fnic); + void fnic_terminate_rport_io(struct fc_rport *); + const char *fnic_state_to_str(unsigned int state); + -void fnic_mq_map_queues_cpus(struct Scsi_Host *host); + + + void fnic_log_q_error(struct fnic *fnic); + void fnic_handle_link_event(struct fnic *fnic); +- ++ void fnic_stats_debugfs_init(struct fnic *fnic); ++ void fnic_stats_debugfs_remove(struct fnic *fnic); + int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); + + void fnic_handle_fip_frame(struct work_struct *work); +@@@ -398,4 -528,86 +406,89 @@@ fnic_chk_state_flags_locked(struct fni + } + void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +++<<<<<<< HEAD +++======= ++ void fnic_free_txq(struct list_head *head); ++ int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, ++ char **subsys_desc); ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); ++ void fnic_delete_fcp_tports(struct fnic *fnic); ++ void fnic_flush_tport_event_list(struct fnic *fnic); ++ int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); ++ unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid); ++ unsigned int fnic_count_all_ioreqs(struct fnic *fnic); ++ unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq, ++ struct scsi_device *device); ++ unsigned int fnic_count_lun_ioreqs(struct fnic *fnic, ++ struct scsi_device *device); ++ ++ struct fnic_scsi_iter_data { ++ struct fnic *fnic; ++ void *data1; ++ void *data2; ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2); ++ }; ++ ++ static inline bool ++ fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) ++ { ++ struct fnic_scsi_iter_data *iter = iter_data; ++ ++ return iter->fn(iter->fnic, sc, iter->data1, iter->data2); ++ } ++ ++ static inline void ++ fnic_scsi_io_iter(struct fnic *fnic, ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2), ++ void *data1, void *data2) ++ { ++ struct fnic_scsi_iter_data iter_data = { ++ .fn = fn, ++ .fnic = fnic, ++ .data1 = data1, ++ .data2 = data2, ++ }; ++ scsi_host_busy_iter(fnic->lport->host, fnic_io_iter_handler, &iter_data); ++ } ++ ++ #ifdef FNIC_DEBUG ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) ++ { ++ int i; ++ ++ for (i = 0; i < len; i = i+8) { ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, ++ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], ++ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); ++ } ++ } ++ ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ int len, char *pfx) ++ { ++ uint32_t s_id, d_id; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", ++ pfx, s_id, d_id, fchdr->fh_type, ++ FNIC_STD_GET_OX_ID(fchdr), len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)fchdr, len); ++ ++ } ++ #else /* FNIC_DEBUG */ ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ uint32_t len, char *pfx) {} ++ #endif /* FNIC_DEBUG */ +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + #endif /* _FNIC_H_ */ +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,a6c2cb49465b..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -89,13 -85,7 +89,17 @@@ static unsigned int fnic_max_qdepth = F + module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + +++<<<<<<< HEAD + +static struct libfc_function_template fnic_transport_template = { + + .frame_send = fnic_send, + + .lport_set_port_id = fnic_set_port_id, + + .fcp_abort_io = fnic_empty_scsi_cleanup, + + .fcp_cleanup = fnic_empty_scsi_cleanup, + + .exch_mgr_reset = fnic_exch_mgr_reset + +}; +++======= ++ struct workqueue_struct *fnic_fip_queue; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + static int fnic_slave_alloc(struct scsi_device *sdev) + { +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f,a38672ac224e..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -102,22 -105,6 +117,25 @@@ static const char *fnic_fcpio_status_to + return fcpio_status_str[status]; + } + +++<<<<<<< HEAD + +static void fnic_cleanup_io(struct fnic *fnic); + + + +static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, + + struct scsi_cmnd *sc) + +{ + + u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1); + + + + return &fnic->io_req_lock[hash]; + +} + + + +static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, + + int tag) + +{ + + return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; + +} + + +++======= +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + /* + * Unmap the data buffer and sense buffer for an io_req, + * also unmap and free the device-private scatter/gather list. +@@@ -140,11 -127,70 +158,70 @@@ static void fnic_release_ioreq_buf(stru + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + } + ++ static bool ++ fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2) ++ { ++ u32 *portid = data1; ++ unsigned int *count = data2; ++ struct fnic_io_req *io_req = fnic_priv(sc)->io_req; ++ ++ if (!io_req || (*portid && (io_req->port_id != *portid))) ++ return true; ++ ++ *count += 1; ++ return true; ++ } ++ ++ unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid) ++ { ++ unsigned int count = 0; ++ ++ fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter, ++ &portid, &count); ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "portid = 0x%x count = %u\n", portid, count); ++ return count; ++ } ++ ++ unsigned int fnic_count_all_ioreqs(struct fnic *fnic) ++ { ++ return fnic_count_ioreqs(fnic, 0); ++ } ++ ++ static bool ++ fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2) ++ { ++ struct scsi_device *scsi_device = data1; ++ unsigned int *count = data2; ++ ++ if (sc->device != scsi_device || !fnic_priv(sc)->io_req) ++ return true; ++ ++ *count += 1; ++ return true; ++ } ++ ++ unsigned int ++ fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device) ++ { ++ unsigned int count = 0; ++ ++ fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter, ++ scsi_device, &count); ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "lun = %p count = %u\n", scsi_device, count); ++ return count; ++ } ++ + /* Free up Copy Wq descriptors. Called with copy_wq lock held */ + -static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) + +static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) + { + /* if no Ack received from firmware, then nothing to clean */ + - if (!fnic->fw_ack_recd[hwq]) + + if (!fnic->fw_ack_recd[0]) + return 1; + + /* +@@@ -211,9 -255,7 +289,13 @@@ int fnic_fw_reset_handler(struct fnic * + + /* indicate fwreset to io path */ + fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); +++<<<<<<< HEAD + + + + skb_queue_purge(&fnic->frame_queue); + + skb_queue_purge(&fnic->tx_queue); +++======= ++ ioreq_count = fnic_count_all_ioreqs(fnic); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) +@@@ -275,28 -319,23 +359,37 @@@ int fnic_flogi_reg_handler(struct fnic + goto flogi_reg_ioreq_end; + } + +- if (fnic->ctlr.map_dest) { +- eth_broadcast_addr(gw_mac); +- format = FCPIO_FLOGI_REG_DEF_DEST; +- } else { +- memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); +- format = FCPIO_FLOGI_REG_GW_DEST; +- } ++ memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); ++ format = FCPIO_FLOGI_REG_GW_DEST; + +- if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { ++ if (fnic->config.flags & VFCF_FIP_CAPABLE) { + fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, + fc_id, gw_mac, +++<<<<<<< HEAD + + fnic->data_src_addr, + + lp->r_a_tov, lp->e_d_tov); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", + + fc_id, fnic->data_src_addr, gw_mac); + + } else { + + fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, + + format, fc_id, gw_mac); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "FLOGI reg issued fcid %x map %d dest %pM\n", + + fc_id, fnic->ctlr.map_dest, gw_mac); +++======= ++ fnic->iport.fpma, ++ iport->r_a_tov, iport->e_d_tov); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n", ++ fc_id, fnic->iport.fpma, gw_mac); ++ } else { ++ fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, ++ format, fc_id, gw_mac); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI reg issued fcid 0x%x dest %p\n", ++ fc_id, gw_mac); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + } + + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +@@@ -387,10 -428,10 +484,10 @@@ static inline int fnic_queue_wq_copy_de + + exch_flags = 0; + if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && +- (rp->flags & FC_RP_FLAGS_RETRY)) ++ (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY)) + exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; + + - fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag, + + fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag, + 0, exch_flags, io_req->sgl_cnt, + SCSI_SENSE_BUFFERSIZE, + io_req->sgl_list_pa, +@@@ -415,38 -455,31 +512,56 @@@ + return 0; + } + + -int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) + +/* + + * fnic_queuecommand + + * Routine to send a scsi cdb + + * Called with host_lock held and interrupts disabled. + + */ + +static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) + { +++<<<<<<< HEAD + + const int tag = scsi_cmd_to_rq(sc)->tag; + + struct fc_lport *lp = shost_priv(sc->device->host); +++======= ++ struct request *const rq = scsi_cmd_to_rq(sc); ++ uint32_t mqtag = 0; ++ void (*done)(struct scsi_cmnd *) = scsi_done; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + struct fc_rport *rport; + struct fnic_io_req *io_req = NULL; +- struct fnic *fnic = lport_priv(lp); ++ struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host)); ++ struct fnic_iport_s *iport = NULL; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct vnic_wq_copy *wq; + - int ret = 1; + + int ret; + u64 cmd_trace; + int sg_count = 0; + unsigned long flags = 0; + unsigned long ptr; + + spinlock_t *io_lock = NULL; + int io_lock_acquired = 0; +++<<<<<<< HEAD + + struct fc_rport_libfc_priv *rp; + + + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) + + return SCSI_MLQUEUE_HOST_BUSY; + + + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + + return SCSI_MLQUEUE_HOST_BUSY; + + + + rport = starget_to_rport(scsi_target(sc->device)); + + if (!rport) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ uint16_t hwq = 0; ++ struct fnic_tport_s *tport = NULL; ++ struct rport_dd_data_s *rdd_data; ++ uint16_t lun0_delay = 0; ++ ++ rport = starget_to_rport(scsi_target(sc->device)); ++ if (!rport) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + "returning DID_NO_CONNECT for IO as rport is NULL\n"); + sc->result = DID_NO_CONNECT << 16; + done(sc); +@@@ -455,49 -488,97 +570,138 @@@ + + ret = fc_remote_port_chkready(rport); + if (ret) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + "rport is not ready\n"); +- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = ret; + done(sc); + return 0; + } + +++<<<<<<< HEAD + + rp = rport->dd_data; + + if (!rp || rp->rp_state == RPORT_ST_DELETE) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "rport 0x%x removed, returning DID_NO_CONNECT\n", + + rport->port_id); + + + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + + sc->result = DID_NO_CONNECT<<16; +++======= ++ mqtag = blk_mq_unique_tag(rq); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ iport = &fnic->iport; ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "returning DID_NO_CONNECT for IO as iport state: %d\n", ++ iport->state); ++ sc->result = DID_NO_CONNECT << 16; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + done(sc); + return 0; + } + +++<<<<<<< HEAD + + if (rp->rp_state != RPORT_ST_READY) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", + + rport->port_id, rp->rp_state); + + + + sc->result = DID_IMM_RETRY << 16; + + done(sc); + + return 0; + + } + + + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + + return SCSI_MLQUEUE_HOST_BUSY; +++======= ++ /* fc_remote_port_add() may have added the tport to ++ * fc_transport but dd_data not yet set ++ */ ++ rdd_data = rport->dd_data; ++ tport = rdd_data->tport; ++ if (!tport || (rdd_data->iport != iport)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "dd_data not yet set in SCSI for rport portid: 0x%x\n", ++ rport->port_id); ++ tport = fnic_find_tport_by_fcid(iport, rport->port_id); ++ if (!tport) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n", ++ rport->port_id); ++ sc->result = DID_BUS_BUSY << 16; ++ done(sc); ++ return 0; ++ } ++ ++ /* Re-assign same params as in fnic_fdls_add_tport */ ++ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; ++ rport->supported_classes = ++ FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; ++ /* the dd_data is allocated by fctransport of size dd_fcrport_size */ ++ rdd_data = rport->dd_data; ++ rdd_data->tport = tport; ++ rdd_data->iport = iport; ++ tport->rport = rport; ++ tport->flags |= FNIC_FDLS_SCSI_REGISTERED; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) ++ && (tport->state != FDLS_TGT_STATE_ADISC)) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "returning DID_NO_CONNECT for IO as tport state: %d\n", ++ tport->state); ++ sc->result = DID_NO_CONNECT << 16; ++ done(sc); ++ return 0; ++ } +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + atomic_inc(&fnic->in_flight); ++ atomic_inc(&tport->in_flight); + ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { ++ atomic_dec(&fnic->in_flight); ++ atomic_dec(&tport->in_flight); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", ++ fnic->state_flags); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ if (!tport->lun0_delay) { ++ lun0_delay = 1; ++ tport->lun0_delay++; ++ } ++ +++<<<<<<< HEAD + + /* + + * Release host lock, use driver resource specific locks from here. + + * Don't re-enable interrupts in case they were disabled prior to the + + * caller disabling them. + + */ + + spin_unlock(lp->host->host_lock); + + CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; + + CMD_FLAGS(sc) = FNIC_NO_FLAGS; +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; ++ fnic_priv(sc)->flags = FNIC_NO_FLAGS; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + /* Get a new io_req for this SCSI IO */ + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); +@@@ -582,8 -675,7 +787,12 @@@ + mempool_free(io_req, fnic->io_req_pool); + } + atomic_dec(&fnic->in_flight); +++<<<<<<< HEAD + + /* acquire host lock before returning to SCSI */ + + spin_lock(lp->host->host_lock); +++======= ++ atomic_dec(&tport->in_flight); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + return ret; + } else { + atomic64_inc(&fnic_stats->io_stats.active_ios); +@@@ -608,11 -700,17 +817,22 @@@ out + + /* if only we issued IO, will we have the io lock */ + if (io_lock_acquired) + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + atomic_dec(&fnic->in_flight); +++<<<<<<< HEAD + + /* acquire host lock before returning to SCSI */ + + spin_lock(lp->host->host_lock); +++======= ++ atomic_dec(&tport->in_flight); ++ ++ if (lun0_delay) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "LUN0 delay\n"); ++ mdelay(LUN0_DELAY_TIME); ++ } ++ +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + return ret; + } + +@@@ -654,17 -751,10 +874,11 @@@ static int fnic_fcpio_fw_reset_cmpl_han + /* Ready to send flogi out */ + fnic->state = FNIC_IN_ETH_MODE; + } else { + - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + - "reset failed with header status: %s\n", + - fnic_fcpio_status_to_str(hdr_status)); + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "fnic fw_reset : failed %s\n", + + fnic_fcpio_status_to_str(hdr_status)); + +- /* +- * Unable to change to eth mode, cannot send out flogi +- * Change state to fc mode, so that subsequent Flogi +- * requests from libFC will cause more attempts to +- * reset the firmware. Free the cached flogi +- */ + fnic->state = FNIC_IN_FC_MODE; + atomic64_inc(&reset_stats->fw_reset_failures); + ret = -1; +@@@ -686,9 -774,9 +899,9 @@@ + * If fnic is being removed, or fw reset failed + * free the flogi frame. Else, send it out + */ +- if (fnic->remove_wait || ret) { ++ if (ret) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - fnic_free_txq(&fnic->tx_queue); + + skb_queue_purge(&fnic->tx_queue); + goto reset_cmpl_handler_end; + } + +@@@ -724,13 -812,13 +937,23 @@@ static int fnic_fcpio_flogi_reg_cmpl_ha + + /* Check flogi registration completion status */ + if (!hdr_status) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "flog reg succeeded\n"); + + fnic->state = FNIC_IN_FC_MODE; + + } else { + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "fnic flogi reg :failed %s\n", +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "FLOGI reg succeeded\n"); ++ fnic->state = FNIC_IN_FC_MODE; ++ } else { ++ FNIC_SCSI_DBG(KERN_DEBUG, ++ fnic->lport->host, fnic->fnic_num, ++ "fnic flogi reg failed: %s\n", +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + fnic_fcpio_status_to_str(hdr_status)); + fnic->state = FNIC_IN_ETH_MODE; + ret = -1; +@@@ -1005,22 -1126,10 +1231,13 @@@ static void fnic_fcpio_icmnd_cmpl_handl + ((u64)icmnd_cmpl->_resvd0[1] << 56 | + (u64)icmnd_cmpl->_resvd0[0] << 48 | + jiffies_to_msecs(jiffies - start_time)), + - desc, cmd_trace, fnic_flags_and_state(sc)); + + desc, cmd_trace, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + +- if (sc->sc_data_direction == DMA_FROM_DEVICE) { +- fnic->lport->host_stats.fcp_input_requests++; +- fnic->fcp_input_bytes += xfer_len; +- } else if (sc->sc_data_direction == DMA_TO_DEVICE) { +- fnic->lport->host_stats.fcp_output_requests++; +- fnic->fcp_output_bytes += xfer_len; +- } else +- fnic->lport->host_stats.fcp_control_requests++; +- + /* Call SCSI completion function to complete the IO */ + - scsi_done(sc); + + if (sc->scsi_done) + + sc->scsi_done(sc); + + spin_unlock_irqrestore(io_lock, flags); + + mempool_free(io_req, fnic->io_req_pool); + +@@@ -1351,20 -1507,35 +1568,42 @@@ int fnic_wq_copy_cmpl_handler(struct fn + + static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) + { + - struct request *const rq = scsi_cmd_to_rq(sc); + + const int tag = scsi_cmd_to_rq(sc)->tag; + struct fnic *fnic = data; + struct fnic_io_req *io_req; +++<<<<<<< HEAD + + unsigned long flags = 0; + + spinlock_t *io_lock; +++======= +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + unsigned long start_time = 0; ++ unsigned long flags; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + - uint16_t hwq = 0; + - int tag; + - int mqtag; + + - mqtag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(mqtag); + - tag = blk_mq_unique_tag_to_tag(mqtag); + + io_lock = fnic_io_lock_tag(fnic, tag); + + spin_lock_irqsave(io_lock, flags); + +++<<<<<<< HEAD + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; ++ ++ io_req = fnic_priv(sc)->io_req; ++ if (!io_req) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", ++ hwq, mqtag, tag, fnic_priv(sc)->flags); ++ return true; ++ } ++ ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + /* + * We will be here only when FW completes reset + * without sending completions for outstanding ios. +@@@ -1374,20 -1545,18 +1613,32 @@@ + complete(io_req->dr_done); + else if (io_req && io_req->abts_done) + complete(io_req->abts_done); +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + return true; + - } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + + spin_unlock_irqrestore(io_lock, flags); + return true; + } + + if (!io_req) { + + spin_unlock_irqrestore(io_lock, flags); + + goto cleanup_scsi_cmd; + + } + + +++<<<<<<< HEAD + + CMD_SP(sc) = NULL; + + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fnic_priv(sc)->io_req = NULL; ++ io_req->sc = NULL; ++ start_time = io_req->start_time; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + /* + * If there is a scsi_cmnd associated with this io_req, then +@@@ -1396,36 -1565,28 +1647,56 @@@ + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + + +cleanup_scsi_cmd: + sc->result = DID_TRANSPORT_DISRUPTED << 16; +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + + tag, sc, jiffies - start_time); +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", ++ mqtag, tag, sc, (jiffies - start_time)); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + +++<<<<<<< HEAD + + /* Complete the command to SCSI */ + + if (sc->scsi_done) { + + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) + + shost_printk(KERN_ERR, fnic->lport->host, + + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", + + tag, sc); + + + + FNIC_TRACE(fnic_cleanup_io, + + sc->device->host->host_no, tag, sc, + + jiffies_to_msecs(jiffies - start_time), + + 0, ((u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + + + sc->scsi_done(sc); + + } +++======= ++ FNIC_TRACE(fnic_cleanup_io, ++ sc->device->host->host_no, tag, sc, ++ jiffies_to_msecs(jiffies - start_time), ++ 0, ((u64) sc->cmnd[0] << 32 | ++ (u64) sc->cmnd[2] << 24 | ++ (u64) sc->cmnd[3] << 16 | ++ (u64) sc->cmnd[4] << 8 | sc->cmnd[5]), ++ (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)-> ++ state)); ++ ++ /* Complete the command to SCSI */ ++ scsi_done(sc); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + return true; + } + +@@@ -1498,31 -1695,35 +1805,43 @@@ wq_copy_cleanup_scsi_cmd + + static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, + u32 task_req, u8 *fc_lun, + - struct fnic_io_req *io_req, + - unsigned int hwq) + + struct fnic_io_req *io_req) + { + - struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; + + struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; + + struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; ++ struct fnic_tport_s *tport = io_req->tport; + + - spin_lock_irqsave(&fnic->fnic_lock, flags); + + spin_lock_irqsave(host->host_lock, flags); + if (unlikely(fnic_chk_state_flags_locked(fnic, + FNIC_FLAGS_IO_BLOCKED))) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(host->host_lock, flags); +++======= ++ atomic_dec(&fnic->in_flight); ++ atomic_dec(&tport->in_flight); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + return 1; + } else + atomic_inc(&fnic->in_flight); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, flags); + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + atomic_dec(&fnic->in_flight); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ atomic_dec(&tport->in_flight); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + "fnic_queue_abort_io_req: failure: no descriptors\n"); + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); + return 1; +@@@ -1551,24 -1752,31 +1870,43 @@@ struct fnic_rport_abort_io_iter_data + + static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) + { + - struct request *const rq = scsi_cmd_to_rq(sc); + struct fnic_rport_abort_io_iter_data *iter_data = data; + struct fnic *fnic = iter_data->fnic; + - int abt_tag = 0; + + int abt_tag = scsi_cmd_to_rq(sc)->tag; + struct fnic_io_req *io_req; +++<<<<<<< HEAD + + spinlock_t *io_lock; + + unsigned long flags; +++======= +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct scsi_lun fc_lun; + enum fnic_ioreq_state old_ioreq_state; +++<<<<<<< HEAD +++======= ++ uint16_t hwq = 0; ++ unsigned long flags; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + - abt_tag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(abt_tag); + + io_lock = fnic_io_lock_tag(fnic, abt_tag); + + spin_lock_irqsave(io_lock, flags); + +++<<<<<<< HEAD + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + +++======= ++ if (!sc) { ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq); ++ return true; ++ } ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + if (!io_req || io_req->port_id != iter_data->port_id) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + return true; + } + +@@@ -1585,41 -1793,42 +1923,68 @@@ + * Found IO that is still pending with firmware and + * belongs to rport that went away + */ + - if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + return true; + } ++ + if (io_req->abts_done) { + shost_printk(KERN_ERR, fnic->lport->host, +++<<<<<<< HEAD + + "fnic_rport_exch_reset: io_req->abts_done is set " + + "state is %s\n", + + fnic_ioreq_state_to_str(CMD_STATE(sc))); +++======= ++ "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n", ++ fnic_ioreq_state_to_str(fnic_priv(sc)->state)); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + } + + - if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { + + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { + shost_printk(KERN_ERR, fnic->lport->host, +++<<<<<<< HEAD + + "rport_exch_reset " + + "IO not yet issued %p tag 0x%x flags " + + "%x state %d\n", + + sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc)); + + } + + old_ioreq_state = CMD_STATE(sc); + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + + atomic64_inc(&reset_stats->device_reset_terminates); + + abt_tag |= FNIC_TAG_DEV_RST; + + } + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); + + BUG_ON(io_req->abts_done); + + + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ "rport_exch_reset IO not yet issued %p abt_tag 0x%x", ++ sc, abt_tag); ++ shost_printk(KERN_ERR, fnic->lport->host, ++ "flags %x state %d\n", fnic_priv(sc)->flags, ++ fnic_priv(sc)->state); ++ } ++ old_ioreq_state = fnic_priv(sc)->state; ++ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; ++ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; ++ ++ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { ++ atomic64_inc(&reset_stats->device_reset_terminates); ++ abt_tag |= FNIC_TAG_DEV_RST; ++ } ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc); ++ WARN_ON_ONCE(io_req->abts_done); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + "fnic_rport_reset_exch: Issuing abts\n"); + + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + +- /* Now queue the abort command to firmware */ ++ /* Queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, +@@@ -1648,8 -1861,10 +2014,10 @@@ + return true; + } + + -void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) + +static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) + { ++ unsigned int io_count = 0; ++ unsigned long flags; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct fnic_rport_abort_io_iter_data iter_data = { + .fnic = fnic, +@@@ -1657,10 -1872,9 +2025,16 @@@ + .term_cnt = 0, + }; + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "fnic_rport_exch_reset called portid 0x%06x\n", + + port_id); +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "fnic rport exchange reset for tport: 0x%06x\n", ++ port_id); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + if (fnic->in_remove) + return; +@@@ -1674,37 -1913,35 +2073,55 @@@ + + void fnic_terminate_rport_io(struct fc_rport *rport) + { +- struct fc_rport_libfc_priv *rdata; +- struct fc_lport *lport; +- struct fnic *fnic; ++ struct fnic_tport_s *tport; ++ struct rport_dd_data_s *rdd_data; ++ struct fnic_iport_s *iport = NULL; ++ struct fnic *fnic = NULL; + + if (!rport) { +- printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); ++ pr_err("rport is NULL\n"); + return; + } +- rdata = rport->dd_data; + +- if (!rdata) { +- printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); +- return; ++ rdd_data = rport->dd_data; ++ if (rdd_data) { ++ tport = rdd_data->tport; ++ if (!tport) { ++ pr_err( ++ "term rport io called after tport is deleted. Returning 0x%8x\n", ++ rport->port_id); ++ } else { ++ pr_err( ++ "term rport io called after tport is set 0x%8x\n", ++ rport->port_id); ++ pr_err( ++ "tport maybe rediscovered\n"); ++ ++ iport = (struct fnic_iport_s *) tport->iport; ++ fnic = iport->fnic; ++ fnic_rport_exch_reset(fnic, rport->port_id); ++ } + } +++<<<<<<< HEAD + + lport = rdata->local_port; + + + + if (!lport) { + + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); + + return; + + } + + fnic = lport_priv(lport); + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, "fnic_terminate_rport_io called" + + " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", + + rport->port_name, rport->node_name, rport, + + rport->port_id); + + + + if (fnic->in_remove) + + return; + + + + fnic_rport_exch_reset(fnic, rport->port_id); +++======= +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + } + + /* +@@@ -1719,7 -1957,7 +2137,11 @@@ int fnic_abort_cmd(struct scsi_cmnd *sc + struct fnic *fnic; + struct fnic_io_req *io_req = NULL; + struct fc_rport *rport; +++<<<<<<< HEAD + + spinlock_t *io_lock; +++======= ++ struct rport_dd_data_s *rdd_data; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + unsigned long flags; + unsigned long start_time = 0; + int ret = SUCCESS; +@@@ -1737,23 -1977,60 +2159,67 @@@ + fc_block_scsi_eh(sc); + + /* Get local-port, check ready and link up */ +++<<<<<<< HEAD + + lp = shost_priv(sc->device->host); + + + + fnic = lport_priv(lp); +++======= ++ fnic = *((struct fnic **) shost_priv(sc->device->host)); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ iport = &fnic->iport; ++ +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + fnic_stats = &fnic->fnic_stats; + abts_stats = &fnic->fnic_stats.abts_stats; + term_stats = &fnic->fnic_stats.term_stats; + + rport = starget_to_rport(scsi_target(sc->device)); + - mqtag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(mqtag); + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", + + rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); + + - fnic_priv(sc)->flags = FNIC_NO_FLAGS; + + CMD_FLAGS(sc) = FNIC_NO_FLAGS; + +- if (lp->state != LPORT_ST_READY || !(lp->link_up)) { ++ rdd_data = rport->dd_data; ++ tport = rdd_data->tport; ++ ++ if (!tport) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Abort cmd called after tport delete! rport fcid: 0x%x", ++ rport->port_id); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n", ++ sc->device->lun, hwq, mqtag, ++ sc->cmnd[0], fnic_priv(sc)->flags); ++ ret = FAILED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_abort_cmd_end; ++ } ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x", ++ rport->port_id, sc->device->lun, hwq, mqtag); ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Op: 0x%x flags: 0x%x\n", ++ sc->cmnd[0], ++ fnic_priv(sc)->flags); ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport NOT in READY state"); ++ ret = FAILED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_abort_cmd_end; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) && ++ (tport->state != FDLS_TGT_STATE_ADISC)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "tport state: %d\n", tport->state); + ret = FAILED; + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + +@@@ -1767,13 -2045,13 +2233,18 @@@ + * happened, the completion wont actually complete the command + * and it will be considered as an aborted command + * + - * .io_req will not be cleared except while holding io_req_lock. + + * The CMD_SP will not be cleared except while holding io_req_lock. + */ + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - io_req = fnic_priv(sc)->io_req; + + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ ret = FAILED; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + goto fnic_abort_cmd_end; + } + +@@@ -1953,24 -2231,33 +2423,37 @@@ static inline int fnic_queue_dr_io_req( + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + struct scsi_lun fc_lun; + int ret = 0; +++<<<<<<< HEAD + + unsigned long intr_flags; +++======= ++ unsigned long flags; ++ uint16_t hwq = 0; ++ uint32_t tag = 0; ++ struct fnic_tport_s *tport = io_req->tport; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + - tag = io_req->tag; + - hwq = blk_mq_unique_tag_to_hwq(tag); + - wq = &fnic->hw_copy_wq[hwq]; + - + - spin_lock_irqsave(&fnic->fnic_lock, flags); + + spin_lock_irqsave(host->host_lock, intr_flags); + if (unlikely(fnic_chk_state_flags_locked(fnic, + FNIC_FLAGS_IO_BLOCKED))) { + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, intr_flags); + return FAILED; +- } else ++ } else { + atomic_inc(&fnic->in_flight); +++<<<<<<< HEAD + + spin_unlock_irqrestore(host->host_lock, intr_flags); +++======= ++ atomic_inc(&tport->in_flight); ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "queue_dr_io_req failure - no descriptors\n"); + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); + ret = -EAGAIN; +@@@ -1992,8 -2280,9 +2475,9 @@@ + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + + lr_io_req_end: + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + atomic_dec(&fnic->in_flight); ++ atomic_dec(&tport->in_flight); + + return ret; + } +@@@ -2197,17 -2489,23 +2680,28 @@@ int fnic_device_reset(struct scsi_cmnd + struct fnic_io_req *io_req = NULL; + struct fc_rport *rport; + int status; ++ int count = 0; + int ret = FAILED; + + spinlock_t *io_lock; + unsigned long flags; + unsigned long start_time = 0; + struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct reset_stats *reset_stats; + - int mqtag = rq->tag; + + int tag = rq->tag; + DECLARE_COMPLETION_ONSTACK(tm_done); + + int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ + bool new_sc = 0; +++<<<<<<< HEAD +++======= ++ uint16_t hwq = 0; ++ struct fnic_iport_s *iport = NULL; ++ struct rport_dd_data_s *rdd_data; ++ struct fnic_tport_s *tport; ++ u32 old_soft_reset_count; ++ u32 old_link_down_cnt; ++ int exit_dr = 0; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + /* Wait for rport to unblock */ + fc_block_scsi_eh(sc); +@@@ -2222,12 -2520,38 +2716,44 @@@ + atomic64_inc(&reset_stats->device_resets); + + rport = starget_to_rport(scsi_target(sc->device)); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", + + rport->port_id, sc->device->lun, sc); +++======= + +- if (lp->state != LPORT_ST_READY || !(lp->link_up)) ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", ++ rport->port_id, sc->device->lun, hwq, mqtag, ++ fnic_priv(sc)->flags); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) ++ ++ rdd_data = rport->dd_data; ++ tport = rdd_data->tport; ++ if (!tport) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n", ++ rport->port_id, sc->device->lun); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; ++ } ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport NOT in READY state"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) && ++ (tport->state != FDLS_TGT_STATE_ADISC)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "tport state: %d\n", tport->state); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* Check if remote port up */ + if (fc_remote_port_chkready(rport)) { +@@@ -2265,14 -2591,24 +2790,28 @@@ + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; +++<<<<<<< HEAD + + CMD_SP(sc) = (char *)io_req; +++======= ++ io_req->tag = mqtag; ++ fnic_priv(sc)->io_req = io_req; ++ io_req->tport = tport; ++ io_req->sc = sc; ++ ++ if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) ++ WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n", ++ fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); ++ ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = ++ io_req; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + } + io_req->dr_done = &tm_done; + - fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + - fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; + + spin_unlock_irqrestore(io_lock, flags); + + - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); + + /* + * issue the device reset, if enqueue failed, clean up the ioreq +@@@ -2285,10 -2621,15 +2824,15 @@@ + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); + ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ old_link_down_cnt = iport->fnic->link_down_cnt; ++ old_soft_reset_count = fnic->soft_reset_count; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ + /* + * Wait on the local completion for LUN reset. The io_req may be + * freed while we wait since we hold no lock. +@@@ -2296,17 -2637,42 +2840,47 @@@ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ /* ++ * Wake up can be due to the following reasons: ++ * 1) The device reset completed from target. ++ * 2) Device reset timed out. ++ * 3) A link-down/host_reset may have happened in between. ++ * 4) The device reset was aborted and io_req->dr_done was called. ++ */ ++ ++ exit_dr = 0; ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if ((old_link_down_cnt != fnic->link_down_cnt) || ++ (fnic->reset_in_progress) || ++ (fnic->soft_reset_count != old_soft_reset_count) || ++ (iport->state != FNIC_IPORT_STATE_READY)) ++ exit_dr = 1; ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + if (!io_req) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + - "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "io_req is null tag 0x%x sc 0x%p\n", tag, sc); + goto fnic_device_reset_end; + } ++ ++ if (exit_dr) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Host reset called for fnic. Exit device reset\n"); ++ io_req->dr_done = NULL; ++ goto fnic_device_reset_clean; ++ } + io_req->dr_done = NULL; + + - status = fnic_priv(sc)->lr_status; + + status = CMD_LR_STATUS(sc); + + /* + * If lun reset not completed, bail out with failed. io_req +@@@ -2314,55 -2680,13 +2888,61 @@@ + */ + if (status == FCPIO_INVALID_CODE) { + atomic64_inc(&reset_stats->device_reset_timeouts); + - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset timed out\n"); +++<<<<<<< HEAD + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; + + spin_unlock_irqrestore(io_lock, flags); + + int_to_scsilun(sc->device->lun, &fc_lun); + + /* + + * Issue abort and terminate on device reset request. + + * If q'ing of terminate fails, retry it after a delay. + + */ + + while (1) { + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { + + spin_unlock_irqrestore(io_lock, flags); + + break; + + } + + spin_unlock_irqrestore(io_lock, flags); + + if (fnic_queue_abort_io_req(fnic, + + tag | FNIC_TAG_DEV_RST, + + FCPIO_ITMF_ABT_TASK_TERM, + + fc_lun.scsi_lun, io_req)) { + + wait_for_completion_timeout(&tm_done, + + msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); + + } else { + + spin_lock_irqsave(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + + io_req->abts_done = &tm_done; + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Abort and terminate issued on Device reset " + + "tag 0x%x sc 0x%p\n", tag, sc); + + break; + + } + + } + + while (1) { + + spin_lock_irqsave(io_lock, flags); + + if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { + + spin_unlock_irqrestore(io_lock, flags); + + wait_for_completion_timeout(&tm_done, + + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + + break; + + } else { + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + io_req->abts_done = NULL; + + goto fnic_device_reset_clean; + + } + + } +++======= ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; ++ int_to_scsilun(sc->device->lun, &fc_lun); ++ goto fnic_device_reset_clean; +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + } else { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + } + + /* Completed, but not successful, clean up the io_req, return fail */ +@@@ -2383,11 -2707,10 +2963,18 @@@ + * succeeds + */ + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Device reset failed" + + " since could not abort all IOs\n"); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "Device reset failed: Cannot abort all IOs\n"); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + goto fnic_device_reset_clean; + } + +@@@ -2416,13 -2751,25 +3012,28 @@@ fnic_device_reset_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + - if (new_sc) { + - fnic->sgreset_sc = NULL; + - mutex_unlock(&fnic->sgreset_mutex); + - } + + /* free tag if it is allocated */ + + if (unlikely(tag_gen_flag)) + + fnic_scsi_host_end_tag(fnic, sc); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { ++ if (count >= 2) { ++ ret = FAILED; ++ break; ++ } ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Cannot clean up all IOs for the LUN\n"); ++ schedule_timeout(msecs_to_jiffies(1000)); ++ count++; ++ } ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + "Returning from device reset %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); +@@@ -2433,68 -2780,78 +3044,96 @@@ + return ret; + } + +- /* Clean up all IOs, clean up libFC local port */ +- int fnic_reset(struct Scsi_Host *shost) ++ static void fnic_post_flogo_linkflap(struct fnic *fnic) ++ { ++ unsigned long flags; ++ ++ fnic_fdls_link_status_change(fnic, 0); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ ++ if (fnic->link_status) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ fnic_fdls_link_status_change(fnic, 1); ++ return; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } ++ ++ /* Logout from all the targets and simulate link flap */ ++ void fnic_reset(struct Scsi_Host *shost) + { +- struct fc_lport *lp; + struct fnic *fnic; +- int ret = 0; + struct reset_stats *reset_stats; + +- lp = shost_priv(shost); +- fnic = lport_priv(lp); ++ fnic = *((struct fnic **) shost_priv(shost)); + reset_stats = &fnic->fnic_stats.reset_stats; + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_reset called\n"); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Issuing fnic reset\n"); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + + atomic64_inc(&reset_stats->fnic_resets); ++ fnic_post_flogo_linkflap(fnic); + +- /* +- * Reset local port, this will clean up libFC exchanges, +- * reset remote port sessions, and if link is up, begin flogi +- */ +- ret = fc_lport_reset(lp); +- +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Returning from fnic reset %s\n", + + (ret == 0) ? + + "SUCCESS" : "FAILED"); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Returning from fnic reset"); +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + +- if (ret == 0) +- atomic64_inc(&reset_stats->fnic_reset_completions); +- else +- atomic64_inc(&reset_stats->fnic_reset_failures); ++ atomic64_inc(&reset_stats->fnic_reset_completions); ++ } ++ ++ int fnic_issue_fc_host_lip(struct Scsi_Host *shost) ++ { ++ int ret = 0; ++ struct fnic *fnic = *((struct fnic **) shost_priv(shost)); ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FC host lip issued"); + ++ ret = fnic_host_reset(shost); + return ret; + } + +++<<<<<<< HEAD + +/* + + * SCSI Error handling calls driver's eh_host_reset if all prior + + * error handling levels return FAILED. If host reset completes + + * successfully, and if link is up, then Fabric login begins. + + * + + * Host Reset is the highest level of error recovery. If this fails, then + + * host is offlined by SCSI. + + * + + */ + +int fnic_host_reset(struct scsi_cmnd *sc) +++======= ++ int fnic_host_reset(struct Scsi_Host *shost) +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + { + - int ret = SUCCESS; + + int ret; + unsigned long wait_host_tmo; + - struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + struct Scsi_Host *shost = sc->device->host; + + struct fc_lport *lp = shost_priv(shost); + + struct fnic *fnic = lport_priv(lp); + unsigned long flags; + - struct fnic_iport_s *iport = &fnic->iport; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + - if (fnic->reset_in_progress == NOT_IN_PROGRESS) { + - fnic->reset_in_progress = IN_PROGRESS; + + if (!fnic->internal_reset_inprogress) { + + fnic->internal_reset_inprogress = true; + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - wait_for_completion_timeout(&fnic->reset_completion_wait, + - msecs_to_jiffies(10000)); + - + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - if (fnic->reset_in_progress == IN_PROGRESS) { + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - FNIC_SCSI_DBG(KERN_WARNING, fnic->lport->host, fnic->fnic_num, + - "Firmware reset in progress. Skipping another host reset\n"); + - return SUCCESS; + - } + - fnic->reset_in_progress = IN_PROGRESS; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "host reset in progress skipping another host reset\n"); + + return SUCCESS; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +@@@ -2523,124 -2890,9 +3162,127 @@@ + return ret; + } + +++<<<<<<< HEAD + +/* + + * This fxn is called from libFC when host is removed + + */ + +void fnic_scsi_abort_io(struct fc_lport *lp) + +{ + + int err = 0; + + unsigned long flags; + + enum fnic_state old_state; + + struct fnic *fnic = lport_priv(lp); + + DECLARE_COMPLETION_ONSTACK(remove_wait); + + + + /* Issue firmware reset for fnic, wait for reset to complete */ + +retry_fw_reset: + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && + + fnic->link_events) { + + /* fw reset is in progress, poll for its completion */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_timeout(msecs_to_jiffies(100)); + + goto retry_fw_reset; + + } + + + + fnic->remove_wait = &remove_wait; + + old_state = fnic->state; + + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + err = fnic_fw_reset_handler(fnic); + + if (err) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + + fnic->state = old_state; + + fnic->remove_wait = NULL; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + /* Wait for firmware reset to complete */ + + wait_for_completion_timeout(&remove_wait, + + msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + fnic->remove_wait = NULL; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_scsi_abort_io %s\n", + + (fnic->state == FNIC_IN_ETH_MODE) ? + + "SUCCESS" : "FAILED"); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + +} + + + +/* + + * This fxn called from libFC to clean up driver IO state on link down + + */ + +void fnic_scsi_cleanup(struct fc_lport *lp) + +{ + + unsigned long flags; + + enum fnic_state old_state; + + struct fnic *fnic = lport_priv(lp); + + + + /* issue fw reset */ + +retry_fw_reset: + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { + + /* fw reset is in progress, poll for its completion */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_timeout(msecs_to_jiffies(100)); + + goto retry_fw_reset; + + } + + old_state = fnic->state; + + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (fnic_fw_reset_handler(fnic)) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + + fnic->state = old_state; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + } + + + +} + + + +void fnic_empty_scsi_cleanup(struct fc_lport *lp) + +{ + +} + + + +void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) + +{ + + struct fnic *fnic = lport_priv(lp); + + + + /* Non-zero sid, nothing to do */ + + if (sid) + + goto call_fc_exch_mgr_reset; + + + + if (did) { + + fnic_rport_exch_reset(fnic, did); + + goto call_fc_exch_mgr_reset; + + } + + + + /* + + * sid = 0, did = 0 + + * link down or device being removed + + */ + + if (!fnic->in_remove) + + fnic_scsi_cleanup(lp); + + else + + fnic_scsi_abort_io(lp); + + + + /* call libFC exch mgr reset to reset its exchanges */ + +call_fc_exch_mgr_reset: + + fc_exch_mgr_reset(lp, sid, did); + + + +} + + +++======= +++>>>>>>> 6335be1c5009 (scsi: fnic: Modify IO path to use FDLS) + static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) + { + - struct request *const rq = scsi_cmd_to_rq(sc); + struct fnic_pending_aborts_iter_data *iter_data = data; + struct fnic *fnic = iter_data->fnic; + int cmd_state; +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h +index 1cb6a68c8e4e..05d30fa6a71e 100644 +--- a/drivers/scsi/fnic/fnic_io.h ++++ b/drivers/scsi/fnic/fnic_io.h +@@ -19,6 +19,7 @@ + #define _FNIC_IO_H_ + + #include ++#include "fnic_fdls.h" + + #define FNIC_DFLT_SG_DESC_CNT 32 + #define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */ +@@ -53,6 +54,8 @@ enum fnic_ioreq_state { + }; + + struct fnic_io_req { ++ struct fnic_iport_s *iport; ++ struct fnic_tport_s *tport; + struct host_sg_desc *sgl_list; /* sgl list */ + void *sgl_list_alloc; /* sgl list address used for free */ + dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ +* Unmerged path drivers/scsi/fnic/fnic_main.c +* Unmerged path drivers/scsi/fnic/fnic_scsi.c +diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h +index ca7ab8afa60a..d582ca7ecdea 100644 +--- a/drivers/scsi/fnic/fnic_stats.h ++++ b/drivers/scsi/fnic/fnic_stats.h +@@ -141,6 +141,4 @@ struct stats_debug_info { + }; + + int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); +-void fnic_stats_debugfs_init(struct fnic *); +-void fnic_stats_debugfs_remove(struct fnic *); + #endif /* _FNIC_STATS_H_ */ diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6cfba115.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6cfba115.failed new file mode 100644 index 0000000000000..2c7fce6c36eeb --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6cfba115.failed @@ -0,0 +1,226 @@ +scsi: fnic: Remove extern definition from .c files + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 6cfba11510d6f4d0e863fc0fa939c7a983cf13bd +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/6cfba115.failed + +Implement review comments from Martin: + Remove extern definition of fnic_fip_queue from .c files + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250106224451.3597-2-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 6cfba11510d6f4d0e863fc0fa939c7a983cf13bd) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fip.c +# drivers/scsi/fnic/fip.h +# drivers/scsi/fnic/fnic_fcs.c +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,1e8cd64f9a5c..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -26,166 -14,37 +26,171 @@@ + #include + #include + #include + +#include + #include + -#include + -#include + +#include + #include "fnic_io.h" + #include "fnic.h" + -#include "fnic_fdls.h" + -#include "fdls_fc.h" + +#include "fnic_fip.h" + #include "cq_enet_desc.h" + #include "cq_exch_desc.h" + -#include "fip.h" + +++<<<<<<< HEAD + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; + +struct workqueue_struct *fnic_fip_queue; +++======= ++ #define MAX_RESET_WAIT_COUNT 64 ++ +++>>>>>>> 6cfba11510d6 (scsi: fnic: Remove extern definition from .c files) + struct workqueue_struct *fnic_event_queue; + + -static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; + +static void fnic_set_eth_mode(struct fnic *); + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); + + -/* + - * Internal Functions + - * This function will initialize the src_mac address to be + - * used in outgoing frames + - */ + -static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, + - uint8_t *src_mac) + +void fnic_handle_link(struct work_struct *work) + { + - FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + - "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", + - src_mac[0], src_mac[1], src_mac[2], src_mac[3], + - src_mac[4], src_mac[5]); + + struct fnic *fnic = container_of(work, struct fnic, link_work); + + unsigned long flags; + + int old_link_status; + + u32 old_link_down_cnt; + + u64 old_port_speed, new_port_speed; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + + + fnic->link_events = 1; /* less work to just set everytime*/ + + + + if (fnic->stop_rx_link_events) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + old_link_down_cnt = fnic->link_down_cnt; + + old_link_status = fnic->link_status; + + old_port_speed = atomic64_read( + + &fnic->fnic_stats.misc_stats.current_port_speed); + + + + fnic->link_status = vnic_dev_link_status(fnic->vdev); + + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + + + + new_port_speed = vnic_dev_port_speed(fnic->vdev); + + atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, + + new_port_speed); + + if (old_port_speed != new_port_speed) + + FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, + + "Current vnic speed set to : %llu\n", + + new_port_speed); + + + + switch (vnic_dev_port_speed(fnic->vdev)) { + + case DCEM_PORTSPEED_10G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; + + break; + + case DCEM_PORTSPEED_20G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; + + break; + + case DCEM_PORTSPEED_25G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; + + break; + + case DCEM_PORTSPEED_40G: + + case DCEM_PORTSPEED_4x10G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; + + break; + + case DCEM_PORTSPEED_100G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; + + break; + + default: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + + break; + + } + + + + if (old_link_status == fnic->link_status) { + + if (!fnic->link_status) { + + /* DOWN -> DOWN */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data(fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN->DOWN", + + strlen("Link Status: DOWN->DOWN")); + + } else { + + if (old_link_down_cnt != fnic->link_down_cnt) { + + /* UP -> DOWN -> UP */ + + fnic->lport->host_stats.link_failure_count++; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, + + "Link Status:UP_DOWN_UP", + + strlen("Link_Status:UP_DOWN_UP") + + ); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "link down\n"); + + fcoe_ctlr_link_down(&fnic->ctlr); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + /* start FCoE VLAN discovery */ + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, + + "Link Status: UP_DOWN_UP_VLAN", + + strlen( + + "Link Status: UP_DOWN_UP_VLAN") + + ); + + fnic_fcoe_send_vlan_req(fnic); + + return; + + } + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "link up\n"); + + fcoe_ctlr_link_up(&fnic->ctlr); + + } else { + + /* UP -> UP */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: UP_UP", + + strlen("Link Status: UP_UP")); + + } + + } + + } else if (fnic->link_status) { + + /* DOWN -> UP */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + /* start FCoE VLAN discovery */ + + fnic_fc_trace_set_data(fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + + strlen("Link Status: DOWN_UP_VLAN")); + + fnic_fcoe_send_vlan_req(fnic); + + + + return; + + } + + + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); + + fcoe_ctlr_link_up(&fnic->ctlr); + + } else { + + /* UP -> DOWN */ + + fnic->lport->host_stats.link_failure_count++; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: UP_DOWN", + + strlen("Link Status: UP_DOWN")); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "deleting fip-timer during link-down\n"); + + del_timer_sync(&fnic->fip_timer); + + } + + fcoe_ctlr_link_down(&fnic->ctlr); + + } + + - memcpy(fnic->iport.fpma, src_mac, 6); + } + + /* +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fip.h +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fip.h +* Unmerged path drivers/scsi/fnic/fnic_fcs.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/74f46a05.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/74f46a05.failed new file mode 100644 index 0000000000000..a4f5632e8c28a --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/74f46a05.failed @@ -0,0 +1,51 @@ +scsi: fnic: Turn off FDMI ACTIVE flags on link down + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 74f46a0524f8d2f01dc7ca95bb5fc463a8603e72 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/74f46a05.failed + +When the link goes down and comes up, FDMI requests are not sent out +anymore. + +Fix bug by turning off FNIC_FDMI_ACTIVE when the link goes down. + +Fixes: 09c1e6ab4ab2 ("scsi: fnic: Add and integrate support for FDMI") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Tested-by: Karan Tilak Kumar + Cc: stable@vger.kernel.org + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250618003431.6314-2-kartilak@cisco.com + Reviewed-by: John Meneghini + Signed-off-by: Martin K. Petersen +(cherry picked from commit 74f46a0524f8d2f01dc7ca95bb5fc463a8603e72) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic.h +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,c2fdc6553e62..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -39,7 -29,8 +39,11 @@@ + + #define DRV_NAME "fnic" + #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +++<<<<<<< HEAD + +#define DRV_VERSION "1.6.0.55" +++======= ++ #define DRV_VERSION "1.8.0.2" +++>>>>>>> 74f46a0524f8 (scsi: fnic: Turn off FDMI ACTIVE flags on link down) + #define PFX DRV_NAME ": " + #define DFX DRV_NAME "%d: " + +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fnic.h diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7dbe3aa2.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7dbe3aa2.failed new file mode 100644 index 0000000000000..e42018e9aed06 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7dbe3aa2.failed @@ -0,0 +1,84 @@ +scsi: fnic: Return appropriate error code for mem alloc failure + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 7dbe3aa2f3f83949174b64860dadfaeec3454cff +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7dbe3aa2.failed + +Return appropriate error code from fnic_probe when memory create slab pool +fails. Fix bug report. + + Suggested-by: Dan Carpenter + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250110091746.17671-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 7dbe3aa2f3f83949174b64860dadfaeec3454cff) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,2d51adf18501..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -774,24 -887,35 +774,51 @@@ static int fnic_probe(struct pci_dev *p + fnic->fw_ack_index[i] = -1; + } + + + for (i = 0; i < FNIC_IO_LOCKS; i++) + + spin_lock_init(&fnic->io_req_lock[i]); + + + + err = -ENOMEM; + + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + + if (!fnic->io_req_pool) + + goto err_out_free_resources; + + + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); +++<<<<<<< HEAD + + if (!pool) + + goto err_out_free_ioreq_pool; +++======= ++ if (!pool) { ++ err = -ENOMEM; ++ goto err_out_free_resources; ++ } +++>>>>>>> 7dbe3aa2f3f8 (scsi: fnic: Return appropriate error code for mem alloc failure) + fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; + + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); +- if (!pool) ++ if (!pool) { ++ err = -ENOMEM; + goto err_out_free_dflt_pool; ++ } + fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + +++<<<<<<< HEAD +++======= ++ pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache); ++ if (!pool) { ++ err = -ENOMEM; ++ goto err_out_fdls_frame_pool; ++ } ++ fnic->frame_pool = pool; ++ ++ pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM, ++ fdls_frame_elem_cache); ++ if (!pool) { ++ err = -ENOMEM; ++ goto err_out_fdls_frame_elem_pool; ++ } ++ fnic->frame_elem_pool = pool; ++ +++>>>>>>> 7dbe3aa2f3f8 (scsi: fnic: Return appropriate error code for mem alloc failure) + /* setup vlan config, hw inserts vlan header */ + fnic->vlan_hw_insert = 1; + fnic->vlan_id = 0; +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7e6886b7.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7e6886b7.failed new file mode 100644 index 0000000000000..077fd6030a0e5 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7e6886b7.failed @@ -0,0 +1,4421 @@ +scsi: fnic: Code cleanup + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 7e6886b705fd8b338dbd4b7492bd45f0259cc55f +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7e6886b7.failed + +Replace existing host structure with fnic host. + +Add headers from scsi to support new functionality. + +Remove unused code and declarations. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-14-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 7e6886b705fd8b338dbd4b7492bd45f0259cc55f) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fip.c +# drivers/scsi/fnic/fip.h +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_fcs.c +# drivers/scsi/fnic/fnic_main.c +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,b02459b6ec2f..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -22,8 -10,10 +22,15 @@@ + #include + #include + #include +++<<<<<<< HEAD + +#include + +#include +++======= ++ #include ++ #include ++ #include ++ #include +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + #include "fnic_io.h" + #include "fnic_res.h" + #include "fnic_trace.h" +@@@ -229,8 -341,9 +236,14 @@@ struct fnic_cpy_wq + /* Per-instance private data structure */ + struct fnic { + int fnic_num; +++<<<<<<< HEAD + + struct fc_lport *lport; + + struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ +++======= ++ enum fnic_role_e role; ++ struct fnic_iport_s iport; ++ struct Scsi_Host *host; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + struct vnic_dev_bar bar0; + + struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; +@@@ -259,10 -376,9 +272,7 @@@ + u32 vlan_hw_insert:1; /* let hw insert the tag */ + u32 in_remove:1; /* fnic device in removal */ + u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ +- u32 link_events:1; /* set when we get any link event*/ +- +- struct completion *remove_wait; /* device remove thread blocks */ + + - struct completion *fw_reset_done; + - u32 reset_in_progress; + atomic_t in_flight; /* io counter */ + bool internal_reset_inprogress; + u32 _reserved; /* fill hole */ +@@@ -398,4 -519,90 +408,93 @@@ fnic_chk_state_flags_locked(struct fni + } + void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +++<<<<<<< HEAD +++======= ++ void fnic_free_txq(struct list_head *head); ++ int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, ++ char **subsys_desc); ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); ++ void fnic_delete_fcp_tports(struct fnic *fnic); ++ void fnic_flush_tport_event_list(struct fnic *fnic); ++ int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); ++ unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid); ++ unsigned int fnic_count_all_ioreqs(struct fnic *fnic); ++ unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq, ++ struct scsi_device *device); ++ unsigned int fnic_count_lun_ioreqs(struct fnic *fnic, ++ struct scsi_device *device); ++ void fnic_scsi_unload(struct fnic *fnic); ++ void fnic_scsi_unload_cleanup(struct fnic *fnic); ++ int fnic_get_debug_info(struct stats_debug_info *info, ++ struct fnic *fnic); ++ ++ struct fnic_scsi_iter_data { ++ struct fnic *fnic; ++ void *data1; ++ void *data2; ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2); ++ }; ++ ++ static inline bool ++ fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) ++ { ++ struct fnic_scsi_iter_data *iter = iter_data; ++ ++ return iter->fn(iter->fnic, sc, iter->data1, iter->data2); ++ } ++ ++ static inline void ++ fnic_scsi_io_iter(struct fnic *fnic, ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2), ++ void *data1, void *data2) ++ { ++ struct fnic_scsi_iter_data iter_data = { ++ .fn = fn, ++ .fnic = fnic, ++ .data1 = data1, ++ .data2 = data2, ++ }; ++ scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data); ++ } ++ ++ #ifdef FNIC_DEBUG ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) ++ { ++ int i; ++ ++ for (i = 0; i < len; i = i+8) { ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, ++ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], ++ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); ++ } ++ } ++ ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ int len, char *pfx) ++ { ++ uint32_t s_id, d_id; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", ++ pfx, s_id, d_id, fchdr->fh_type, ++ FNIC_STD_GET_OX_ID(fchdr), len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)fchdr, len); ++ ++ } ++ #else /* FNIC_DEBUG */ ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ uint32_t len, char *pfx) {} ++ #endif /* FNIC_DEBUG */ +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + #endif /* _FNIC_H_ */ +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,086b92578624..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -26,40 -14,183 +26,203 @@@ + #include + #include + #include + +#include + #include +++<<<<<<< HEAD + +#include +++======= ++ #include ++ #include +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + #include "fnic_io.h" + #include "fnic.h" + -#include "fnic_fdls.h" + -#include "fdls_fc.h" + +#include "fnic_fip.h" + #include "cq_enet_desc.h" + #include "cq_exch_desc.h" + -#include "fip.h" + - + -#define MAX_RESET_WAIT_COUNT 64 + + -extern struct workqueue_struct *fnic_fip_queue; + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; + +struct workqueue_struct *fnic_fip_queue; + struct workqueue_struct *fnic_event_queue; + +++<<<<<<< HEAD + +static void fnic_set_eth_mode(struct fnic *); + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +++======= ++ static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; ++ ++ /* ++ * Internal Functions ++ * This function will initialize the src_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, ++ uint8_t *src_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ src_mac[0], src_mac[1], src_mac[2], src_mac[3], ++ src_mac[4], src_mac[5]); ++ ++ memcpy(fnic->iport.fpma, src_mac, 6); ++ } ++ ++ /* ++ * This function will initialize the dst_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, ++ uint8_t *dst_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], ++ dst_mac[4], dst_mac[5]); ++ ++ memcpy(fnic->iport.fcfmac, dst_mac, 6); ++ } ++ ++ void fnic_get_host_port_state(struct Scsi_Host *shost) ++ { ++ struct fnic *fnic = *((struct fnic **) shost_priv(shost)); ++ struct fnic_iport_s *iport = &fnic->iport; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (!fnic->link_status) ++ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; ++ else if (iport->state == FNIC_IPORT_STATE_READY) ++ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; ++ else ++ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } ++ ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "link up: %d, usefip: %d", linkup, iport->usefip); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ if (linkup) { ++ if (iport->usefip) { ++ iport->state = FNIC_IPORT_STATE_FIP; ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "link up: %d, usefip: %d", linkup, iport->usefip); ++ fnic_fcoe_send_vlan_req(fnic); ++ } else { ++ iport->state = FNIC_IPORT_STATE_FABRIC_DISC; ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "iport->state: %d", iport->state); ++ fnic_fdls_disc_start(iport); ++ } ++ } else { ++ iport->state = FNIC_IPORT_STATE_LINK_WAIT; ++ if (!is_zero_ether_addr(iport->fpma)) ++ vnic_dev_del_addr(fnic->vdev, iport->fpma); ++ fnic_common_fip_cleanup(fnic); ++ fnic_fdls_link_down(iport); ++ ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ } ++ ++ ++ /* ++ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp ++ * or derive from FC_MAP and FCID combination. While it should be ++ * same, revisit this if there is any possibility of not-correct. ++ */ ++ void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, ++ uint8_t *fcid) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; ++ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; ++ ++ memcpy(&fcmac[3], fcid, 3); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ ethhdr->h_dest[0], ethhdr->h_dest[1], ++ ethhdr->h_dest[2], ethhdr->h_dest[3], ++ ethhdr->h_dest[4], ethhdr->h_dest[5]); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], ++ fcmac[5]); ++ ++ fnic_fdls_set_fcoe_srcmac(fnic, fcmac); ++ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); ++ } ++ ++ void fnic_fdls_init(struct fnic *fnic, int usefip) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ /* Initialize iPort structure */ ++ iport->state = FNIC_IPORT_STATE_INIT; ++ iport->fnic = fnic; ++ iport->usefip = usefip; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", ++ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], ++ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); ++ ++ INIT_LIST_HEAD(&iport->tport_list); ++ INIT_LIST_HEAD(&iport->tport_list_pending_del); ++ ++ fnic_fdls_disc_init(iport); ++ } +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + void fnic_handle_link(struct work_struct *work) + { + struct fnic *fnic = container_of(work, struct fnic, link_work); + + unsigned long flags; + int old_link_status; + u32 old_link_down_cnt; + - int max_count = 0; + + u64 old_port_speed, new_port_speed; + +++<<<<<<< HEAD + + spin_lock_irqsave(&fnic->fnic_lock, flags); +++======= ++ if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Interrupt mode is not MSI\n"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + - spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + fnic->link_events = 1; /* less work to just set everytime*/ + + if (fnic->stop_rx_link_events) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Stop link rx events\n"); ++ return; ++ } ++ ++ /* Do not process if the fnic is already in transitional state */ ++ if ((fnic->state != FNIC_IN_ETH_MODE) ++ && (fnic->state != FNIC_IN_FC_MODE)) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic in transitional state: %d. link up: %d ignored", ++ fnic->state, vnic_dev_link_status(fnic->vdev)); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Current link status: %d iport state: %d\n", ++ fnic->link_status, fnic->iport.state); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return; + } + +@@@ -71,206 -199,113 +234,290 @@@ + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + +++<<<<<<< HEAD + + new_port_speed = vnic_dev_port_speed(fnic->vdev); + + atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, + + new_port_speed); + + if (old_port_speed != new_port_speed) + + FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, + + "Current vnic speed set to : %llu\n", + + new_port_speed); + + + + switch (vnic_dev_port_speed(fnic->vdev)) { + + case DCEM_PORTSPEED_10G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; + + break; + + case DCEM_PORTSPEED_20G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; + + break; + + case DCEM_PORTSPEED_25G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; + + break; + + case DCEM_PORTSPEED_40G: + + case DCEM_PORTSPEED_4x10G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; + + break; + + case DCEM_PORTSPEED_100G: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; + + break; + + default: + + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; + + fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; + + break; +++======= ++ while (fnic->reset_in_progress == IN_PROGRESS) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic reset in progress. Link event needs to wait\n"); ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "waiting for reset completion\n"); ++ wait_for_completion_timeout(&fnic->reset_completion_wait, ++ msecs_to_jiffies(5000)); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "woken up from reset completion wait\n"); ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ max_count++; ++ if (max_count >= MAX_RESET_WAIT_COUNT) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Rstth waited for too long. Skipping handle link event\n"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ return; ++ } ++ } ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Marking fnic reset in progress\n"); ++ fnic->reset_in_progress = IN_PROGRESS; ++ ++ if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) || ++ (fnic->link_status != old_link_status)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "old link status: %d link status: %d\n", ++ old_link_status, (int) fnic->link_status); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "old down count %d down count: %d\n", ++ old_link_down_cnt, (int) fnic->link_down_cnt); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) { + /* DOWN -> DOWN */ +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data(fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN->DOWN", + + strlen("Link Status: DOWN->DOWN")); + + } else { + + if (old_link_down_cnt != fnic->link_down_cnt) { + + /* UP -> DOWN -> UP */ + + fnic->lport->host_stats.link_failure_count++; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, + + "Link Status:UP_DOWN_UP", + + strlen("Link_Status:UP_DOWN_UP") + + ); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "link down\n"); + + fcoe_ctlr_link_down(&fnic->ctlr); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + /* start FCoE VLAN discovery */ + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, + + "Link Status: UP_DOWN_UP_VLAN", + + strlen( + + "Link Status: UP_DOWN_UP_VLAN") + + ); + + fnic_fcoe_send_vlan_req(fnic); + + return; + + } + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "link up\n"); + + fcoe_ctlr_link_up(&fnic->ctlr); + + } else { + + /* UP -> UP */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: UP_UP", + + strlen("Link Status: UP_UP")); +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "down->down\n"); ++ } else { ++ if (old_link_down_cnt != fnic->link_down_cnt) { ++ /* UP -> DOWN -> UP */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "up->down. Link down\n"); ++ fnic_fdls_link_status_change(fnic, 0); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "down->up. Link up\n"); ++ fnic_fdls_link_status_change(fnic, 1); ++ } else { ++ /* UP -> UP */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "up->up\n"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + } + } else if (fnic->link_status) { + /* DOWN -> UP */ +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + /* start FCoE VLAN discovery */ + + fnic_fc_trace_set_data(fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + + strlen("Link Status: DOWN_UP_VLAN")); + + fnic_fcoe_send_vlan_req(fnic); + + + + return; + + } + + + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); + + fcoe_ctlr_link_up(&fnic->ctlr); + + } else { + + /* UP -> DOWN */ + + fnic->lport->host_stats.link_failure_count++; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: UP_DOWN", + + strlen("Link Status: UP_DOWN")); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "deleting fip-timer during link-down\n"); + + del_timer_sync(&fnic->fip_timer); + + } + + fcoe_ctlr_link_down(&fnic->ctlr); + + } + + +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "down->up. Link up\n"); ++ fnic_fdls_link_status_change(fnic, 1); ++ } else { ++ /* UP -> DOWN */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "up->down. Link down\n"); ++ fnic_fdls_link_status_change(fnic, 0); ++ } ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ fnic->reset_in_progress = NOT_IN_PROGRESS; ++ complete(&fnic->reset_completion_wait); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Marking fnic reset completion\n"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + +- /* +- * This function passes incoming fabric frames to libFC +- */ + void fnic_handle_frame(struct work_struct *work) + { + struct fnic *fnic = container_of(work, struct fnic, frame_work); + - struct fnic_frame_list *cur_frame, *next; + - int fchdr_offset = 0; + + struct fc_lport *lp = fnic->lport; + + unsigned long flags; + + struct sk_buff *skb; + + struct fc_frame *fp; + + - spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + - list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) { + + while ((skb = skb_dequeue(&fnic->frame_queue))) { + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + - list_del(&cur_frame->links); + - spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + - kfree(cur_frame->fp); + - mempool_free(cur_frame, fnic->frame_elem_pool); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + dev_kfree_skb(skb); + + return; + + } + + fp = (struct fc_frame *)skb; + + + + /* + + * If we're in a transitional state, just re-queue and return. + + * The queue will be serviced when we get to a stable state. + + */ + + if (fnic->state != FNIC_IN_FC_MODE && +++<<<<<<< HEAD + + fnic->state != FNIC_IN_ETH_MODE) { + + skb_queue_head(&fnic->frame_queue, skb); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + fc_exch_recv(lp, fp); + + } + +} + + + +void fnic_fcoe_evlist_free(struct fnic *fnic) + +{ + + struct fnic_event *fevt = NULL; + + struct fnic_event *next = NULL; + + unsigned long flags; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (list_empty(&fnic->evlist)) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + + list_del(&fevt->list); + + kfree(fevt); + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +} + + + +void fnic_handle_event(struct work_struct *work) + +{ + + struct fnic *fnic = container_of(work, struct fnic, event_work); + + struct fnic_event *fevt = NULL; + + struct fnic_event *next = NULL; + + unsigned long flags; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (list_empty(&fnic->evlist)) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + + if (fnic->stop_rx_link_events) { + + list_del(&fevt->list); + + kfree(fevt); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + + fnic->state != FNIC_IN_ETH_MODE) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++======= ++ fnic->state != FNIC_IN_ETH_MODE) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Cannot process frame in transitional state\n"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return; + } + +@@@ -605,19 -326,23 +852,28 @@@ drop + + void fnic_handle_fip_frame(struct work_struct *work) + { + - struct fnic_frame_list *cur_frame, *next; + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + unsigned long flags; + + struct sk_buff *skb; + + struct ethhdr *eh; + +++<<<<<<< HEAD + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Processing FIP frame\n"); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue, ++ links) { +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + if (fnic->stop_rx_link_events) { + - list_del(&cur_frame->links); + - spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + - kfree(cur_frame->fp); + - kfree(cur_frame); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + dev_kfree_skb(skb); + return; + } + - + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. +@@@ -733,9 -407,13 +989,16 @@@ void fnic_update_mac_locked(struct fni + new = ctl; + if (ether_addr_equal(data, new)) + return; +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); +++======= ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Update MAC: %u\n", *new); ++ +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) + vnic_dev_del_addr(fnic->vdev, data); + - + memcpy(data, new, ETH_ALEN); + if (!ether_addr_equal(new, ctl)) + vnic_dev_add_addr(fnic->vdev, new); +@@@ -856,75 -454,92 +1119,118 @@@ static void fnic_rq_cmpl_frame_recv(str + + cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); + if (type == CQ_DESC_TYPE_RQ_FCP) { + - cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type, + - &color, &q_number, &completed_index, &eop, &sop, + - &fcoe_fnic_crc_ok, &exchange_id, &tmpl, + - &fcp_bytes_written, &sof, &eof, &ingress_port, + - &packet_error, &fcoe_enc_error, &fcs_ok, + - &vlan_stripped, &vlan); + - ethhdr_stripped = 1; + - bytes_written = fcp_bytes_written; + - } else if (type == CQ_DESC_TYPE_RQ_ENET) { + - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type, + - &color, &q_number, &completed_index, + - &ingress_port, &fcoe, &eop, &sop, &rss_type, + - &csum_not_calc, &rss_hash, &enet_bytes_written, + - &packet_error, &vlan_stripped, &vlan, + - &checksum, &fcoe_sof, &fcoe_fnic_crc_ok, + - &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, + - &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, + - &ipv4_fragment, &fcs_ok); + - + - ethhdr_stripped = 0; + - bytes_written = enet_bytes_written; + + cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, + + &type, &color, &q_number, &completed_index, + + &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, + + &tmpl, &fcp_bytes_written, &sof, &eof, + + &ingress_port, &packet_error, + + &fcoe_enc_error, &fcs_ok, &vlan_stripped, + + &vlan); + + skb_trim(skb, fcp_bytes_written); + + fr_sof(fp) = sof; + + fr_eof(fp) = eof; + + + } else if (type == CQ_DESC_TYPE_RQ_ENET) { + + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, + + &type, &color, &q_number, &completed_index, + + &ingress_port, &fcoe, &eop, &sop, + + &rss_type, &csum_not_calc, &rss_hash, + + &bytes_written, &packet_error, + + &vlan_stripped, &vlan, &checksum, + + &fcoe_sof, &fcoe_fc_crc_ok, + + &fcoe_enc_error, &fcoe_eof, + + &tcp_udp_csum_ok, &udp, &tcp, + + &ipv4_csum_ok, &ipv6, &ipv4, + + &ipv4_fragment, &fcs_ok); + + skb_trim(skb, bytes_written); + if (!fcs_ok) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fcs error. dropping packet.\n"); +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic 0x%p fcs error. Dropping packet.\n", fnic); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + goto drop; + } + - eh = (struct ethhdr *) fp; + - if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) { + + if (fnic_import_rq_eth_pkt(fnic, skb)) + + return; + +++<<<<<<< HEAD + + } else { + + /* wrong CQ type*/ + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic rq_cmpl wrong cq type x%x\n", type); +++======= ++ if (fnic_import_rq_eth_pkt(fnic, fp)) ++ return; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Dropping h_proto 0x%x", ++ be16_to_cpu(eh->h_proto)); ++ goto drop; ++ } ++ } else { ++ /* wrong CQ type */ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic rq_cmpl wrong cq type x%x\n", type); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + goto drop; + } + + - if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) { + + if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); +++<<<<<<< HEAD + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic rq_cmpl fcoe x%x fcsok x%x" + + " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" + + " x%x\n", + + fcoe, fcs_ok, packet_error, + + fcoe_fc_crc_ok, fcoe_enc_error); +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n", ++ fcoe, fcs_ok, packet_error, ++ fcoe_fnic_crc_ok, fcoe_enc_error); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + goto drop; + } + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++<<<<<<< HEAD +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic->stop_rx_link_events: %d\n", ++ fnic->stop_rx_link_events); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + goto drop; + } + - + + fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++<<<<<<< HEAD + + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, + + (char *)skb->data, skb->len)) != 0) { + + printk(KERN_ERR "fnic ctlr frame trace error!!!"); +++======= ++ ++ frame_elem = mempool_alloc(fnic->frame_elem_pool, ++ GFP_ATOMIC | __GFP_ZERO); ++ if (!frame_elem) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Failed to allocate memory for frame elem"); ++ goto drop; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + - frame_elem->fp = fp; + - frame_elem->rx_ethhdr_stripped = ethhdr_stripped; + - frame_elem->frame_len = bytes_written; + - + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - list_add_tail(&frame_elem->links, &fnic->frame_queue); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + skb_queue_tail(&fnic->frame_queue, skb); + queue_work(fnic_event_queue, &fnic->frame_work); + - return; + + + return; + drop: + - kfree(fp); + + dev_kfree_skb_irq(skb); + } + + static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, +@@@ -950,10 -565,10 +1256,10 @@@ int fnic_rq_cmpl_handler(struct fnic *f + cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, + fnic_rq_cmpl_handler_cont, + NULL); + - if (cur_work_done && fnic->stop_rx_link_events != 1) { + + if (cur_work_done) { + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) +- shost_printk(KERN_ERR, fnic->lport->host, ++ shost_printk(KERN_ERR, fnic->host, + "fnic_alloc_rq_frame can't alloc" + " frame\n"); + } +@@@ -971,35 -586,32 +1277,50 @@@ + int fnic_alloc_rq_frame(struct vnic_rq *rq) + { + struct fnic *fnic = vnic_dev_priv(rq->vdev); + - void *buf; + + struct sk_buff *skb; + u16 len; + dma_addr_t pa; + - int ret; + - + + int r; + + +++<<<<<<< HEAD + + len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; + + skb = dev_alloc_skb(len); + + if (!skb) { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "Unable to allocate RQ sk_buff\n"); +++======= ++ len = FNIC_FRAME_HT_ROOM; ++ buf = kmalloc(len, GFP_ATOMIC); ++ if (!buf) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Unable to allocate RQ buffer of size: %d\n", len); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return -ENOMEM; + } + - + - pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE); + + skb_reset_mac_header(skb); + + skb_reset_transport_header(skb); + + skb_reset_network_header(skb); + + skb_put(skb, len); + + pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) { +++<<<<<<< HEAD + + r = -ENOMEM; + + printk(KERN_ERR "PCI mapping failed with error %d\n", r); + + goto free_skb; +++======= ++ ret = -ENOMEM; ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "PCI mapping failed with error %d\n", ret); ++ goto free_buf; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + - fnic_queue_rq_desc(rq, buf, pa, len); + + fnic_queue_rq_desc(rq, skb, pa, len); + return 0; + -free_buf: + - kfree(buf); + - return ret; + + + +free_skb: + + kfree_skb(skb); + + return r; + } + + void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +@@@ -1086,103 -636,129 +1407,147 @@@ static int fnic_send_frame(struct fnic + int ret = 0; + unsigned long flags; + + - pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE); + + fh = fc_frame_header_get(fp); + + skb = fp_skb(fp); + + +++<<<<<<< HEAD + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && + + fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) + + return 0; + + + + if (!fnic->vlan_hw_insert) { + + eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); + + vlan_hdr = skb_push(skb, eth_hdr_len); + + eth_hdr = (struct ethhdr *)vlan_hdr; + + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + + vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); + + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + + fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); + + } else { + + eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); + + eth_hdr = skb_push(skb, eth_hdr_len); + + eth_hdr->h_proto = htons(ETH_P_FCOE); + + fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); + + } + + + + if (fnic->ctlr.map_dest) + + fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); + + else + + memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); + + memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); + + + + tot_len = skb->len; + + BUG_ON(tot_len % 4); + + + + memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); + + fcoe_hdr->fcoe_sof = fr_sof(fp); + + if (FC_FCOE_VER) + + FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); + + + + pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); + + if (dma_mapping_error(&fnic->pdev->dev, pa)) { + + ret = -ENOMEM; + + printk(KERN_ERR "DMA map failed with error %d\n", ret); + + goto free_skb_on_err; + + } + + + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, + + (char *)eth_hdr, tot_len)) != 0) { + + printk(KERN_ERR "fnic ctlr frame trace error!!!"); +++======= ++ if ((fnic_fc_trace_set_data(fnic->fnic_num, ++ FNIC_FC_SEND | 0x80, (char *) frame, ++ frame_len)) != 0) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic ctlr frame trace error"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + spin_lock_irqsave(&fnic->wq_lock[0], flags); + + if (!vnic_wq_desc_avail(wq)) { +++<<<<<<< HEAD + + dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); +++======= ++ dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "vnic work queue descriptor is not available"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + ret = -1; + - goto fnic_send_frame_end; + + goto irq_restore; + } + + - /* hw inserts cos value */ + - fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T, + - 0, fnic->vlan_id, 1, 1, 1); + + fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), + + 0 /* hw inserts cos value */, + + fnic->vlan_id, 1, 1, 1); + + -fnic_send_frame_end: + +irq_restore: + spin_unlock_irqrestore(&fnic->wq_lock[0], flags); + + + +free_skb_on_err: + + if (ret) + + dev_kfree_skb_any(fp_skb(fp)); + + + return ret; + } + + -/** + - * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE + - * info. This interface is used only in the non fast path. (login, fabric + - * registrations etc.) + - * + - * @fnic: fnic instance + - * @frame: frame structure with FC payload filled in + - * @frame_size: length of the frame to be sent + - * @srcmac: source mac address + - * @dstmac: destination mac address + - * + - * Called with the fnic lock held. + +/* + + * fnic_send + + * Routine to send a raw frame + */ + -static int + -fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size, + - uint8_t *srcmac, uint8_t *dstmac) + +int fnic_send(struct fc_lport *lp, struct fc_frame *fp) + { + - struct ethhdr *pethhdr; + - struct fcoe_hdr *pfcoe_hdr; + - struct fnic_frame_list *frame_elem; + - int len = frame_size; + - int ret; + - struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame + + - FNIC_ETH_FCOE_HDRS_OFFSET); + + struct fnic *fnic = lport_priv(lp); + + unsigned long flags; + +++<<<<<<< HEAD + + if (fnic->in_remove) { + + dev_kfree_skb(fp_skb(fp)); + + return -1; + + } +++======= ++ pethhdr = (struct ethhdr *) frame; ++ pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE); ++ memcpy(pethhdr->h_source, srcmac, ETH_ALEN); ++ memcpy(pethhdr->h_dest, dstmac, ETH_ALEN); ++ ++ pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr)); ++ pfcoe_hdr->fcoe_sof = FC_SOF_I3; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + /* + * Queue frame if in a transitional state. + * This occurs while registering the Port_ID / MAC address after FLOGI. + */ +++<<<<<<< HEAD + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { + + skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++======= ++ if ((fnic->state != FNIC_IN_FC_MODE) ++ && (fnic->state != FNIC_IN_ETH_MODE)) { ++ frame_elem = mempool_alloc(fnic->frame_elem_pool, ++ GFP_ATOMIC | __GFP_ZERO); ++ if (!frame_elem) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Failed to allocate memory for frame elem"); ++ return -ENOMEM; ++ } ++ ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n", ++ ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id), ++ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); ++ frame_elem->fp = frame; ++ frame_elem->frame_len = len; ++ list_add_tail(&frame_elem->links, &fnic->tx_queue); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return 0; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + - fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing"); + - + - ret = fnic_send_frame(fnic, frame, len); + - return ret; + -} + - + -void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + - int frame_size) + -{ + - struct fnic *fnic = iport->fnic; + - uint8_t *dstmac, *srcmac; + - + - /* If module unload is in-progress, don't send */ + - if (fnic->in_remove) + - return; + - + - if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) { + - srcmac = iport->fpma; + - dstmac = iport->fcfmac; + - } else { + - srcmac = iport->hwmac; + - dstmac = FCOE_ALL_FCF_MAC; + - } + - + - fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac); + -} + - + -int + -fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame, + - int frame_size) + -{ + - struct fnic *fnic = iport->fnic; + - + - if (fnic->in_remove) + - return -1; + - + - fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing"); + - return fnic_send_frame(fnic, frame, frame_size); + + return fnic_send_frame(fnic, fp); + } + + /** +@@@ -1198,51 -774,76 +1563,112 @@@ + void fnic_flush_tx(struct work_struct *work) + { + struct fnic *fnic = container_of(work, struct fnic, flush_work); + + struct sk_buff *skb; + struct fc_frame *fp; + - struct fnic_frame_list *cur_frame, *next; + +++<<<<<<< HEAD + + while ((skb = skb_dequeue(&fnic->tx_queue))) { + + fp = (struct fc_frame *)skb; + + fnic_send_frame(fnic, fp); +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Flush queued frames"); ++ ++ list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) { ++ fp = cur_frame->fp; ++ list_del(&cur_frame->links); ++ fnic_send_frame(fnic, fp, cur_frame->frame_len); ++ mempool_free(cur_frame, fnic->frame_elem_pool); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + } + + -int + -fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + - void *fp) + +/** + + * fnic_set_eth_mode() - put fnic into ethernet mode. + + * @fnic: fnic device + + * + + * Called without fnic lock held. + + */ + +static void fnic_set_eth_mode(struct fnic *fnic) + { + - struct fnic *fnic = iport->fnic; + - struct ethhdr *ethhdr; + + unsigned long flags; + + enum fnic_state old_state; + int ret; + +++<<<<<<< HEAD + + spin_lock_irqsave(&fnic->fnic_lock, flags); + +again: + + old_state = fnic->state; + + switch (old_state) { + + case FNIC_IN_FC_MODE: + + case FNIC_IN_ETH_TRANS_FC_MODE: + + default: + + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + ret = fnic_fw_reset_handler(fnic); + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) + + goto again; + + if (ret) + + fnic->state = old_state; + + break; + + + + case FNIC_IN_FC_TRANS_ETH_MODE: + + case FNIC_IN_ETH_MODE: + + break; +++======= ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id, ++ fp, fnic->state); ++ ++ if (fp) { ++ ethhdr = (struct ethhdr *) fp; ++ vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest); ++ } ++ ++ /* Change state to reflect transition to FC mode */ ++ if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) ++ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; ++ else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Unexpected fnic state while processing FLOGI response\n"); ++ return -1; ++ } ++ ++ /* ++ * Send FLOGI registration to firmware to set up FC mode. ++ * The new address will be set up when registration completes. ++ */ ++ ret = fnic_flogi_reg_handler(fnic, port_id); ++ if (ret < 0) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "FLOGI registration error ret: %d fnic state: %d\n", ++ ret, fnic->state); ++ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) ++ fnic->state = FNIC_IN_ETH_MODE; ++ ++ return -1; ++ } ++ iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "FLOGI registration success\n"); ++ return 0; ++ } ++ ++ void fnic_free_txq(struct list_head *head) ++ { ++ struct fnic_frame_list *cur_frame, *next; ++ ++ list_for_each_entry_safe(cur_frame, next, head, links) { ++ list_del(&cur_frame->links); ++ kfree(cur_frame->fp); ++ kfree(cur_frame); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } + + static void fnic_wq_complete_frame_send(struct vnic_wq *wq, +@@@ -1303,109 -901,173 +1729,260 @@@ void fnic_free_wq_buf(struct vnic_wq *w + buf->os_buf = NULL; + } + + -void + -fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport, + - unsigned long flags) + +void fnic_fcoe_reset_vlans(struct fnic *fnic) + { +++<<<<<<< HEAD +++======= ++ struct fnic *fnic = iport->fnic; ++ struct fc_rport *rport; ++ struct fc_rport_identifiers ids; ++ struct rport_dd_data_s *rdd_data; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Adding rport fcid: 0x%x", tport->fcid); ++ ++ ids.node_name = tport->wwnn; ++ ids.port_name = tport->wwpn; ++ ids.port_id = tport->fcid; ++ ids.roles = FC_RPORT_ROLE_FCP_TARGET; ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ rport = fc_remote_port_add(fnic->host, 0, &ids); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (!rport) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Failed to add rport for tport: 0x%x", tport->fcid); ++ return; ++ } ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Added rport fcid: 0x%x", tport->fcid); ++ ++ /* Mimic these assignments in queuecommand to avoid timing issues */ ++ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; ++ rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; ++ rdd_data = rport->dd_data; ++ rdd_data->tport = tport; ++ rdd_data->iport = iport; ++ tport->rport = rport; ++ tport->flags |= FNIC_FDLS_SCSI_REGISTERED; ++ } ++ ++ void ++ fnic_fdls_remove_tport(struct fnic_iport_s *iport, ++ struct fnic_tport_s *tport, unsigned long flags) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct rport_dd_data_s *rdd_data; ++ ++ struct fc_rport *rport; ++ ++ if (!tport) ++ return; ++ ++ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE); ++ rport = tport->rport; ++ ++ if (rport) { ++ /* tport resource release will be done ++ * after fnic_terminate_rport_io() ++ */ ++ tport->flags |= FNIC_FDLS_TPORT_DELETED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ /* Interface to scsi_fc_transport */ ++ fc_remote_port_delete(rport); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Deregistered and freed tport fcid: 0x%x from scsi transport fc", ++ tport->fcid); ++ ++ /* ++ * the dd_data is allocated by fc transport ++ * of size dd_fcrport_size ++ */ ++ rdd_data = rport->dd_data; ++ rdd_data->tport = NULL; ++ rdd_data->iport = NULL; ++ list_del(&tport->links); ++ kfree(tport); ++ } else { ++ fnic_del_tport_timer_sync(fnic, tport); ++ list_del(&tport->links); ++ kfree(tport); ++ } ++ } ++ ++ void fnic_delete_fcp_tports(struct fnic *fnic) ++ { ++ struct fnic_tport_s *tport, *next; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fcoe_vlan *next; + +++<<<<<<< HEAD + + /* + + * indicate a link down to fcoe so that all fcf's are free'd + + * might not be required since we did this before sending vlan + + * discovery request + + */ + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (!list_empty(&fnic->vlans)) { + + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + + list_del(&vlan->list); + + kfree(vlan); +++======= ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "removing fcp rport fcid: 0x%x", tport->fcid); ++ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); ++ fnic_del_tport_timer_sync(fnic, tport); ++ fnic_fdls_remove_tport(&fnic->iport, tport, flags); ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } ++ ++ /** ++ * fnic_tport_event_handler() - Handler for remote port events ++ * in the tport_event_queue. ++ * ++ * @work: Handle to the remote port being dequeued ++ */ ++ void fnic_tport_event_handler(struct work_struct *work) ++ { ++ struct fnic *fnic = container_of(work, struct fnic, tport_work); ++ struct fnic_tport_event_s *cur_evt, *next; ++ unsigned long flags; ++ struct fnic_tport_s *tport; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { ++ tport = cur_evt->arg1; ++ switch (cur_evt->event) { ++ case TGT_EV_RPORT_ADD: ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Add rport event"); ++ if (tport->state == FDLS_TGT_STATE_READY) { ++ fnic_fdls_add_tport(&fnic->iport, ++ (struct fnic_tport_s *) cur_evt->arg1, flags); ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Target not ready. Add rport event dropped: 0x%x", ++ tport->fcid); ++ } ++ break; ++ case TGT_EV_RPORT_DEL: ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Remove rport event"); ++ if (tport->state == FDLS_TGT_STATE_OFFLINING) { ++ fnic_fdls_remove_tport(&fnic->iport, ++ (struct fnic_tport_s *) cur_evt->arg1, flags); ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "remove rport event dropped tport fcid: 0x%x", ++ tport->fcid); ++ } ++ break; ++ case TGT_EV_TPORT_DELETE: ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Delete tport event"); ++ fdls_delete_tport(tport->iport, tport); ++ break; ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Unknown tport event"); ++ break; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + - list_del(&cur_evt->links); + - kfree(cur_evt); + } + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + } + + -void fnic_flush_tport_event_list(struct fnic *fnic) + +void fnic_handle_fip_timer(struct fnic *fnic) + { + - struct fnic_tport_event_s *cur_evt, *next; + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + - list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + - list_del(&cur_evt->links); + - kfree(cur_evt); + + if (fnic->stop_rx_link_events) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) + + return; + + + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (list_empty(&fnic->vlans)) { + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* no vlans available, try again */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fip_timer: vlan %d state %d sol_count %d\n", + + vlan->vid, vlan->state, vlan->sol_count); + + switch (vlan->state) { + + case FIP_VLAN_USED: + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "FIP VLAN is selected for FC transaction\n"); + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + break; + + case FIP_VLAN_FAILED: + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* if all vlans are in failed state, restart vlan disc */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + break; + + case FIP_VLAN_SENT: + + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + + /* + + * no response on this vlan, remove from the list. + + * Try the next vlan + + */ + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Dequeue this VLAN ID %d from list\n", + + vlan->vid); + + list_del(&vlan->list); + + kfree(vlan); + + vlan = NULL; + + if (list_empty(&fnic->vlans)) { + + /* we exhausted all vlans, restart vlan disc */ + + spin_unlock_irqrestore(&fnic->vlans_lock, + + flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "fip_timer: vlan list empty, " + + "trigger vlan disc\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + /* check the next vlan */ + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + + list); + + fnic->set_vlan(fnic, vlan->vid); + + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + vlan->sol_count++; + + sol_time = jiffies + msecs_to_jiffies + + (FCOE_CTLR_START_DELAY); + + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + + break; + + } + } +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,fe8816feb247..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -175,9 -171,13 +174,17 @@@ static struct fc_function_template fnic + + static void fnic_get_host_speed(struct Scsi_Host *shost) + { + - struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + struct fc_lport *lp = shost_priv(shost); + + struct fnic *fnic = lport_priv(lp); + u32 port_speed = vnic_dev_port_speed(fnic->vdev); +++<<<<<<< HEAD +++======= ++ struct fnic_stats *fnic_stats = &fnic->fnic_stats; ++ ++ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "port_speed: %d Mbps", port_speed); ++ atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + /* Add in other values as they get defined in fw */ + switch (port_speed) { +@@@ -197,7 -215,18 +204,12 @@@ + case DCEM_PORTSPEED_100G: + fc_host_speed(shost) = FC_PORTSPEED_100GBIT; + break; + - case DCEM_PORTSPEED_128G: + - fc_host_speed(shost) = FC_PORTSPEED_128GBIT; + - break; + default: +++<<<<<<< HEAD +++======= ++ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Unknown FC speed: %d Mbps", port_speed); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; + break; + } +@@@ -221,9 -251,8 +233,14 @@@ static struct fc_host_statistics *fnic_ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { +++<<<<<<< HEAD + + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic: Get vnic stats failed" + + " 0x%x", ret); +++======= ++ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "fnic: Get vnic stats failed: 0x%x", ret); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return stats; + } + vs = fnic->stats; +@@@ -333,7 -360,7 +350,11 @@@ static void fnic_reset_host_stats(struc + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { +++<<<<<<< HEAD + + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "fnic: Reset vnic stats failed" + " 0x%x", ret); + return; +@@@ -558,9 -562,23 +579,29 @@@ static void fnic_set_vlan(struct fnic * + vnic_dev_set_default_vlan(fnic->vdev, vlan_id); + } + +++<<<<<<< HEAD + +static int fnic_scsi_drv_init(struct fnic *fnic) + +{ + + struct Scsi_Host *host = fnic->lport->host; +++======= ++ static void fnic_scsi_init(struct fnic *fnic) ++ { ++ struct Scsi_Host *host = fnic->host; ++ ++ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, ++ host->host_no); ++ ++ host->transportt = fnic_fc_transport; ++ } ++ ++ static int fnic_scsi_drv_init(struct fnic *fnic) ++ { ++ struct Scsi_Host *host = fnic->host; ++ int err; ++ struct pci_dev *pdev = fnic->pdev; ++ struct fnic_iport_s *iport = &fnic->iport; ++ int hwq; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + /* Configure maximum outstanding IO reqs*/ + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) +@@@ -571,32 -589,100 +612,113 @@@ + fnic->fnic_max_tag_id = host->can_queue; + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; + - host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; + + host->max_cmd_len = FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + + if (host->nr_hw_queues > 1) + + shost_printk(KERN_ERR, host, + + "fnic: blk-mq is not supported"); + + - dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", + + host->nr_hw_queues = fnic->wq_copy_count = 1; + + + + shost_printk(KERN_INFO, host, + + "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + + - dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + + shost_printk(KERN_INFO, host, + + "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + +++<<<<<<< HEAD +++ return 0; +++} +++ +++======= ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { ++ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; ++ fnic->sw_copy_wq[hwq].io_req_table = ++ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * ++ sizeof(struct fnic_io_req *), GFP_KERNEL); ++ } ++ ++ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", ++ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); ++ ++ fnic_scsi_init(fnic); ++ ++ err = scsi_add_host(fnic->host, &pdev->dev); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); ++ return -1; ++ } ++ fc_host_maxframe_size(fnic->host) = iport->max_payload_size; ++ fc_host_dev_loss_tmo(fnic->host) = ++ fnic->config.port_down_timeout / 1000; ++ sprintf(fc_host_symbolic_name(fnic->host), ++ DRV_NAME " v" DRV_VERSION " over %s", fnic->name); ++ fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT; ++ fc_host_node_name(fnic->host) = iport->wwnn; ++ fc_host_port_name(fnic->host) = iport->wwpn; ++ fc_host_supported_classes(fnic->host) = FC_COS_CLASS3; ++ memset(fc_host_supported_fc4s(fnic->host), 0, ++ sizeof(fc_host_supported_fc4s(fnic->host))); ++ fc_host_supported_fc4s(fnic->host)[2] = 1; ++ fc_host_supported_fc4s(fnic->host)[7] = 1; ++ fc_host_supported_speeds(fnic->host) = 0; ++ fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT; ++ ++ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data); ++ if (fnic->host->shost_data != NULL) { ++ if (fnic_tgt_id_binding == 0) { ++ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); ++ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE; ++ } else { ++ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); ++ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN; ++ } ++ } ++ ++ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); ++ if (!fnic->io_req_pool) { ++ scsi_remove_host(fnic->host); ++ return -ENOMEM; ++ } ++ + return 0; + } + ++ void fnic_mq_map_queues_cpus(struct Scsi_Host *host) ++ { ++ struct fnic *fnic = *((struct fnic **) shost_priv(host)); ++ struct pci_dev *l_pdev = fnic->pdev; ++ int intr_mode = fnic->config.intr_mode; ++ struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT]; ++ ++ if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) { ++ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "intr_mode is not msix\n"); ++ return; ++ } ++ ++ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "qmap->nr_queues: %d\n", qmap->nr_queues); ++ ++ if (l_pdev == NULL) { ++ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "l_pdev is null\n"); ++ return; ++ } ++ ++ blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET); ++ } ++ +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { + - struct Scsi_Host *host = NULL; + + struct Scsi_Host *host; + + struct fc_lport *lp; + struct fnic *fnic; + mempool_t *pool; + - struct fnic_iport_s *iport; + int err = 0; + int fnic_id = 0; + int i; +@@@ -734,13 -813,38 +856,43 @@@ + /* Get vNIC configuration */ + err = fnic_get_vnic_config(fnic); + if (err) { + - dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, " + + shost_printk(KERN_ERR, fnic->lport->host, + + "Get vNIC configuration failed, " + "aborting.\n"); + - goto err_out_fnic_get_config; + + goto err_out_dev_close; + } + +++<<<<<<< HEAD + + fnic_scsi_drv_init(fnic); +++======= ++ switch (fnic->config.flags & 0xff0) { ++ case VFCF_FC_INITIATOR: ++ { ++ host = ++ scsi_host_alloc(&fnic_host_template, ++ sizeof(struct fnic *)); ++ if (!host) { ++ dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n"); ++ err = -ENOMEM; ++ goto err_out_scsi_host_alloc; ++ } ++ *((struct fnic **) shost_priv(host)) = fnic; ++ ++ fnic->host = host; ++ fnic->role = FNIC_ROLE_FCP_INITIATOR; ++ dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n", ++ fnic->fnic_num); ++ } ++ break; ++ default: ++ dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num); ++ err = -EINVAL; ++ goto err_out_fnic_role; ++ } ++ ++ /* Setup PCI resources */ ++ pci_set_drvdata(pdev, fnic); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + fnic_get_res_counts(fnic); + +@@@ -941,42 -1039,56 +1093,66 @@@ + + return 0; + +++<<<<<<< HEAD + +err_out_free_exch_mgr: + + fc_exch_mgr_free(lp); + +err_out_remove_scsi_host: + + fc_remove_host(lp->host); + + scsi_remove_host(lp->host); + +err_out_free_rq_buf: + + for (i = 0; i < fnic->rq_count; i++) +++======= ++ err_out_free_stats_debugfs: ++ fnic_stats_debugfs_remove(fnic); ++ scsi_remove_host(fnic->host); ++ err_out_scsi_drv_init: ++ fnic_free_intr(fnic); ++ err_out_fnic_request_intr: ++ err_out_alloc_rq_buf: ++ for (i = 0; i < fnic->rq_count; i++) { ++ if (ioread32(&fnic->rq[i].ctrl->enable)) ++ vnic_rq_disable(&fnic->rq[i]); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + - } + vnic_dev_notify_unset(fnic->vdev); + -err_out_fnic_notify_set: + - mempool_destroy(fnic->frame_elem_pool); + -err_out_fdls_frame_elem_pool: + - mempool_destroy(fnic->frame_pool); + -err_out_fdls_frame_pool: + +err_out_free_max_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); + err_out_free_dflt_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); + +err_out_free_ioreq_pool: + + mempool_destroy(fnic->io_req_pool); + err_out_free_resources: + fnic_free_vnic_resources(fnic); + -err_out_fnic_alloc_vnic_res: + +err_out_clear_intr: + fnic_clear_intr_mode(fnic); +++<<<<<<< HEAD + +err_out_dev_close: +++======= ++ err_out_fnic_set_intr_mode: ++ if (IS_FNIC_FCP_INITIATOR(fnic)) ++ scsi_host_put(fnic->host); ++ err_out_fnic_role: ++ err_out_scsi_host_alloc: ++ err_out_fnic_get_config: ++ err_out_dev_mac_addr: ++ err_out_dev_init: +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + vnic_dev_close(fnic->vdev); + -err_out_dev_open: + -err_out_dev_cmd_init: + +err_out_dev_cmd_deinit: + +err_out_vnic_unregister: + vnic_dev_unregister(fnic->vdev); + -err_out_dev_register: + +err_out_iounmap: + fnic_iounmap(fnic); + -err_out_fnic_map_bar: + -err_out_map_bar: + -err_out_set_dma_mask: + +err_out_release_regions: + pci_release_regions(pdev); + -err_out_pci_request_regions: + +err_out_disable_device: + pci_disable_device(pdev); + -err_out_pci_enable_device: + +err_out_free_hba: + + fnic_stats_debugfs_remove(fnic); + ida_free(&fnic_ida, fnic->fnic_num); + err_out_ida_alloc: + - kfree(fnic); + -err_out_fnic_alloc: + + scsi_host_put(lp->host); + +err_out: + return err; + } + +@@@ -1055,8 -1159,13 +1231,16 @@@ static void fnic_remove(struct pci_dev + fnic_iounmap(fnic); + pci_release_regions(pdev); + pci_disable_device(pdev); + - pci_set_drvdata(pdev, NULL); + ida_free(&fnic_ida, fnic->fnic_num); +++<<<<<<< HEAD + + scsi_host_put(lp->host); +++======= ++ if (IS_FNIC_FCP_INITIATOR(fnic)) { ++ fnic_scsi_unload_cleanup(fnic); ++ scsi_host_put(fnic->host); ++ } ++ kfree(fnic); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + static struct pci_driver fnic_driver = { +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f,7133b254cbe4..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -35,8 -23,8 +35,7 @@@ + #include + #include + #include +- #include + #include + -#include + #include "fnic_io.h" + #include "fnic.h" + +@@@ -140,11 -126,70 +139,73 @@@ static void fnic_release_ioreq_buf(stru + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + } + +++<<<<<<< HEAD +++======= ++ static bool ++ fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2) ++ { ++ u32 *portid = data1; ++ unsigned int *count = data2; ++ struct fnic_io_req *io_req = fnic_priv(sc)->io_req; ++ ++ if (!io_req || (*portid && (io_req->port_id != *portid))) ++ return true; ++ ++ *count += 1; ++ return true; ++ } ++ ++ unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid) ++ { ++ unsigned int count = 0; ++ ++ fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter, ++ &portid, &count); ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "portid = 0x%x count = %u\n", portid, count); ++ return count; ++ } ++ ++ unsigned int fnic_count_all_ioreqs(struct fnic *fnic) ++ { ++ return fnic_count_ioreqs(fnic, 0); ++ } ++ ++ static bool ++ fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2) ++ { ++ struct scsi_device *scsi_device = data1; ++ unsigned int *count = data2; ++ ++ if (sc->device != scsi_device || !fnic_priv(sc)->io_req) ++ return true; ++ ++ *count += 1; ++ return true; ++ } ++ ++ unsigned int ++ fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device) ++ { ++ unsigned int count = 0; ++ ++ fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter, ++ scsi_device, &count); ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "lun = %p count = %u\n", scsi_device, count); ++ return count; ++ } ++ +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + /* Free up Copy Wq descriptors. Called with copy_wq lock held */ + -static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) + +static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) + { + /* if no Ack received from firmware, then nothing to clean */ + - if (!fnic->fw_ack_recd[hwq]) + + if (!fnic->fw_ack_recd[0]) + return 1; + + /* +@@@ -227,6 -268,8 +288,11 @@@ int fnic_fw_reset_handler(struct fnic * + if (!vnic_wq_copy_desc_avail(wq)) + ret = -EAGAIN; + else { +++<<<<<<< HEAD +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "ioreq_count: %u\n", ioreq_count); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > +@@@ -240,12 -283,12 +306,21 @@@ + + if (!ret) { + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Issued fw reset\n"); + + } else { + + fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Failed to issue fw reset\n"); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Issued fw reset\n"); ++ } else { ++ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Failed to issue fw reset\n"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + return ret; +@@@ -275,28 -318,23 +350,42 @@@ int fnic_flogi_reg_handler(struct fnic + goto flogi_reg_ioreq_end; + } + + - memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); + - format = FCPIO_FLOGI_REG_GW_DEST; + + if (fnic->ctlr.map_dest) { + + eth_broadcast_addr(gw_mac); + + format = FCPIO_FLOGI_REG_DEF_DEST; + + } else { + + memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); + + format = FCPIO_FLOGI_REG_GW_DEST; + + } + + - if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { + fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, + fc_id, gw_mac, +++<<<<<<< HEAD + + fnic->data_src_addr, + + lp->r_a_tov, lp->e_d_tov); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", + + fc_id, fnic->data_src_addr, gw_mac); + + } else { + + fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, + + format, fc_id, gw_mac); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "FLOGI reg issued fcid %x map %d dest %pM\n", + + fc_id, fnic->ctlr.map_dest, gw_mac); +++======= ++ fnic->iport.fpma, ++ iport->r_a_tov, iport->e_d_tov); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n", ++ fc_id, fnic->iport.fpma, gw_mac); ++ } else { ++ fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, ++ format, fc_id, gw_mac); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "FLOGI reg issued fcid 0x%x dest %p\n", ++ fc_id, gw_mac); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +@@@ -366,14 -409,11 +455,18 @@@ static inline int fnic_queue_wq_copy_de + int_to_scsilun(sc->device->lun, &fc_lun); + + /* Enqueue the descriptor in the Copy WQ */ + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (unlikely(!vnic_wq_copy_desc_avail(wq))) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "fnic_queue_wq_copy_desc failure - no descriptors\n"); + atomic64_inc(&misc_stats->io_cpwq_alloc_failures); + return SCSI_MLQUEUE_HOST_BUSY; +@@@ -446,7 -478,7 +539,11 @@@ static int fnic_queuecommand_lck(struc + + rport = starget_to_rport(scsi_target(sc->device)); + if (!rport) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "returning DID_NO_CONNECT for IO as rport is NULL\n"); + sc->result = DID_NO_CONNECT << 16; + done(sc); +@@@ -455,49 -487,98 +552,131 @@@ + + ret = fc_remote_port_chkready(rport); + if (ret) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "rport is not ready\n"); + - atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = ret; + done(sc); + return 0; + } + + - mqtag = blk_mq_unique_tag(rq); + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - iport = &fnic->iport; + + rp = rport->dd_data; + + if (!rp || rp->rp_state == RPORT_ST_DELETE) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "rport 0x%x removed, returning DID_NO_CONNECT\n", + + rport->port_id); + +++<<<<<<< HEAD + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + + sc->result = DID_NO_CONNECT<<16; +++======= ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "returning DID_NO_CONNECT for IO as iport state: %d\n", ++ iport->state); ++ sc->result = DID_NO_CONNECT << 16; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + done(sc); + return 0; + } + +++<<<<<<< HEAD + + if (rp->rp_state != RPORT_ST_READY) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", + + rport->port_id, rp->rp_state); + + + + sc->result = DID_IMM_RETRY << 16; +++======= ++ /* fc_remote_port_add() may have added the tport to ++ * fc_transport but dd_data not yet set ++ */ ++ rdd_data = rport->dd_data; ++ tport = rdd_data->tport; ++ if (!tport || (rdd_data->iport != iport)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "dd_data not yet set in SCSI for rport portid: 0x%x\n", ++ rport->port_id); ++ tport = fnic_find_tport_by_fcid(iport, rport->port_id); ++ if (!tport) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n", ++ rport->port_id); ++ sc->result = DID_BUS_BUSY << 16; ++ done(sc); ++ return 0; ++ } ++ ++ /* Re-assign same params as in fnic_fdls_add_tport */ ++ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; ++ rport->supported_classes = ++ FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; ++ /* the dd_data is allocated by fctransport of size dd_fcrport_size */ ++ rdd_data = rport->dd_data; ++ rdd_data->tport = tport; ++ rdd_data->iport = iport; ++ tport->rport = rport; ++ tport->flags |= FNIC_FDLS_SCSI_REGISTERED; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) ++ && (tport->state != FDLS_TGT_STATE_ADISC)) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "returning DID_NO_CONNECT for IO as tport state: %d\n", ++ tport->state); ++ sc->result = DID_NO_CONNECT << 16; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + done(sc); + return 0; + } + + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + + return SCSI_MLQUEUE_HOST_BUSY; + + + atomic_inc(&fnic->in_flight); + - atomic_inc(&tport->in_flight); + +++<<<<<<< HEAD + + /* + + * Release host lock, use driver resource specific locks from here. + + * Don't re-enable interrupts in case they were disabled prior to the + + * caller disabling them. + + */ + + spin_unlock(lp->host->host_lock); + + CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; + + CMD_FLAGS(sc) = FNIC_NO_FLAGS; +++======= ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { ++ atomic_dec(&fnic->in_flight); ++ atomic_dec(&tport->in_flight); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", ++ fnic->state_flags); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ if (!tport->lun0_delay) { ++ lun0_delay = 1; ++ tport->lun0_delay++; ++ } ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; ++ fnic_priv(sc)->flags = FNIC_NO_FLAGS; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + /* Get a new io_req for this SCSI IO */ + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); +@@@ -608,11 -700,17 +787,22 @@@ out + + /* if only we issued IO, will we have the io lock */ + if (io_lock_acquired) + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + atomic_dec(&fnic->in_flight); +++<<<<<<< HEAD + + /* acquire host lock before returning to SCSI */ + + spin_lock(lp->host->host_lock); +++======= ++ atomic_dec(&tport->in_flight); ++ ++ if (lun0_delay) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "LUN0 delay\n"); ++ mdelay(LUN0_DELAY_TIME); ++ } ++ +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return ret; + } + +@@@ -649,31 -746,23 +839,48 @@@ static int fnic_fcpio_fw_reset_cmpl_han + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { + /* Check status of reset completion */ + if (!hdr_status) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "reset cmpl success\n"); + + /* Ready to send flogi out */ + + fnic->state = FNIC_IN_ETH_MODE; + + } else { + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "fnic fw_reset : failed %s\n", + + fnic_fcpio_status_to_str(hdr_status)); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "reset cmpl success\n"); ++ /* Ready to send flogi out */ ++ fnic->state = FNIC_IN_ETH_MODE; ++ } else { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "reset failed with header status: %s\n", ++ fnic_fcpio_status_to_str(hdr_status)); + - +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + + + /* + + * Unable to change to eth mode, cannot send out flogi + + * Change state to fc mode, so that subsequent Flogi + + * requests from libFC will cause more attempts to + + * reset the firmware. Free the cached flogi + + */ + fnic->state = FNIC_IN_FC_MODE; + atomic64_inc(&reset_stats->fw_reset_failures); + ret = -1; + } + } else { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "Unexpected state %s while processing" + + " reset cmpl\n", fnic_state_to_str(fnic->state)); +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Unexpected state while processing reset completion: %s\n", ++ fnic_state_to_str(fnic->state)); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + atomic64_inc(&reset_stats->fw_reset_failures); + ret = -1; + } +@@@ -724,19 -812,19 +931,33 @@@ static int fnic_fcpio_flogi_reg_cmpl_ha + + /* Check flogi registration completion status */ + if (!hdr_status) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "flog reg succeeded\n"); + + fnic->state = FNIC_IN_FC_MODE; + + } else { + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "fnic flogi reg :failed %s\n", +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "FLOGI reg succeeded\n"); ++ fnic->state = FNIC_IN_FC_MODE; ++ } else { ++ FNIC_SCSI_DBG(KERN_DEBUG, ++ fnic->host, fnic->fnic_num, ++ "fnic flogi reg failed: %s\n", +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + fnic_fcpio_status_to_str(hdr_status)); + fnic->state = FNIC_IN_ETH_MODE; + ret = -1; + } + } else { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "Unexpected fnic state %s while" + " processing flogi reg completion\n", + fnic_state_to_str(fnic->state)); +@@@ -806,9 -895,9 +1027,9 @@@ static inline void fnic_fcpio_ack_handl + atomic64_inc( + &fnic->fnic_stats.misc_stats.ack_index_out_of_range); + + - spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + FNIC_TRACE(fnic_fcpio_ack_handler, +- fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], ++ fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], + ox_id_tag[4], ox_id_tag[5]); + } + +@@@ -833,24 -921,45 +1054,52 @@@ static void fnic_fcpio_icmnd_cmpl_handl + u64 cmd_trace; + unsigned long start_time; + unsigned long io_duration_time; + - unsigned int hwq = 0; + - unsigned int mqtag = 0; + - unsigned int tag = 0; + + /* Decode the cmpl description to get the io_req id */ + - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + - fcpio_tag_id_dec(&ftag, &id); + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + fcpio_tag_id_dec(&tag, &id); + icmnd_cmpl = &desc->u.icmnd_cmpl; + +++<<<<<<< HEAD + + if (id >= fnic->fnic_max_tag_id) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "Tag out of range tag %x hdr status = %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); +++======= ++ mqtag = id; ++ tag = blk_mq_unique_tag_to_tag(mqtag); ++ hwq = blk_mq_unique_tag_to_hwq(mqtag); ++ ++ if (hwq != cq_index) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hdr status: %s icmnd completion on the wrong queue\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ } ++ ++ if (tag >= fnic->fnic_max_tag_id) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hdr status: %s Out of range tag\n", ++ fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return; + } + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + +- sc = scsi_host_find_tag(fnic->lport->host, id); ++ sc = scsi_host_find_tag(fnic->host, id); + WARN_ON_ONCE(!sc); + if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ shost_printk(KERN_ERR, fnic->host, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "icmnd_cmpl sc is null - " + "hdr status = %s tag = 0x%x desc = 0x%p\n", + fnic_fcpio_status_to_str(hdr_status), id, desc); +@@@ -871,9 -985,9 +1120,15 @@@ + WARN_ON_ONCE(!io_req); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); +++<<<<<<< HEAD + + CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; + + spin_unlock_irqrestore(io_lock, flags); + + shost_printk(KERN_ERR, fnic->lport->host, +++======= ++ fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ shost_printk(KERN_ERR, fnic->host, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "icmnd_cmpl io_req is null - " + "hdr status = %s tag = 0x%x sc 0x%p\n", + fnic_fcpio_status_to_str(hdr_status), id, sc); +@@@ -894,13 -1008,13 +1149,17 @@@ + * set the FNIC_IO_DONE so that this doesn't get + * flagged as 'out of order' if it was not aborted + */ + - fnic_priv(sc)->flags |= FNIC_IO_DONE; + - fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_FLAGS(sc) |= FNIC_IO_DONE; + + CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; + + spin_unlock_irqrestore(io_lock, flags); + if(FCPIO_ABORTED == hdr_status) + - fnic_priv(sc)->flags |= FNIC_IO_ABORTED; + + CMD_FLAGS(sc) |= FNIC_IO_ABORTED; + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "icmnd_cmpl abts pending " + "hdr status = %s tag = 0x%x sc = 0x%p " + "scsi_status = %x residual = %d\n", +@@@ -931,6 -1045,9 +1190,12 @@@ + + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&fnic_stats->misc_stats.queue_fulls); +++<<<<<<< HEAD +++======= ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "xfer_len: %llu", xfer_len); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + break; + + case FCPIO_TIMEOUT: /* request was timed out */ +@@@ -1071,40 -1189,78 +1336,98 @@@ static void fnic_fcpio_itmf_cmpl_handle + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; + + spinlock_t *io_lock; + unsigned long start_time; + - unsigned int hwq = cq_index; + - unsigned int mqtag; + - unsigned int tag; + + - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + - fcpio_tag_id_dec(&ftag, &id); + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + fcpio_tag_id_dec(&tag, &id); + +++<<<<<<< HEAD + + if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "Tag out of range tag %x hdr status = %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); + + return; + + } + + + + sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + + WARN_ON_ONCE(!sc); + + if (!sc) { + + atomic64_inc(&fnic_stats->io_stats.sc_null); + + shost_printk(KERN_ERR, fnic->lport->host, +++======= ++ mqtag = id & FNIC_TAG_MASK; ++ tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK); ++ hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK); ++ ++ if (hwq != cq_index) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hdr status: %s ITMF completion on the wrong queue\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ } ++ ++ if (tag > fnic->fnic_max_tag_id) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hdr status: %s Tag out of range\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ return; ++ } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hdr status: %s Tag out of range\n", ++ fnic_fcpio_status_to_str(hdr_status)); ++ return; ++ } ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ /* If it is sg3utils allocated SC then tag_id ++ * is max_tag_id and SC is retrieved from io_req ++ */ ++ if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { ++ io_req = fnic->sw_copy_wq[hwq].io_req_table[tag]; ++ if (io_req) ++ sc = io_req->sc; ++ } else { ++ sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK); ++ } ++ ++ WARN_ON_ONCE(!sc); ++ if (!sc) { ++ atomic64_inc(&fnic_stats->io_stats.sc_null); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ shost_printk(KERN_ERR, fnic->host, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", + - fnic_fcpio_status_to_str(hdr_status), tag); + + fnic_fcpio_status_to_str(hdr_status), id); + return; + } + - + - io_req = fnic_priv(sc)->io_req; + + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + WARN_ON_ONCE(!io_req); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; + + shost_printk(KERN_ERR, fnic->lport->host, +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; ++ shost_printk(KERN_ERR, fnic->host, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "itmf_cmpl io_req is null - " + "hdr status = %s tag = 0x%x sc 0x%p\n", + - fnic_fcpio_status_to_str(hdr_status), tag, sc); + + fnic_fcpio_status_to_str(hdr_status), id, sc); + return; + } + start_time = io_req->start_time; +@@@ -1112,17 -1268,22 +1435,34 @@@ + if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { + /* Abort and terminate completion of device reset req */ + /* REVISIT : Add asserts about various flags */ +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "dev reset abts cmpl recd. id %x status %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + + CMD_ABTS_STATUS(sc) = hdr_status; + + CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n", ++ hwq, mqtag, tag, ++ fnic_fcpio_status_to_str(hdr_status)); ++ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; ++ fnic_priv(sc)->abts_status = hdr_status; ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + if (io_req->abts_done) + complete(io_req->abts_done); + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + } else if (id & FNIC_TAG_ABORT) { + /* Completion of abort cmd */ +++<<<<<<< HEAD +++======= ++ shost_printk(KERN_DEBUG, fnic->host, ++ "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n", ++ hwq, mqtag, tag, ++ fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + switch (hdr_status) { + case FCPIO_SUCCESS: + break; +@@@ -1134,7 -1295,7 +1474,11 @@@ + &term_stats->terminate_fw_timeouts); + break; + case FCPIO_ITMF_REJECTED: +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "abort reject recd. id %d\n", + (int)(id & FNIC_TAG_MASK)); + break; +@@@ -1164,12 -1325,12 +1508,16 @@@ + + /* If the status is IO not found consider it as success */ + if (hdr_status == FCPIO_IO_NOT_FOUND) + - fnic_priv(sc)->abts_status = FCPIO_SUCCESS; + + CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; + + - if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "abts cmpl recd. id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); +@@@ -1181,87 -1342,89 +1529,140 @@@ + */ + if (io_req->abts_done) { + complete(io_req->abts_done); +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + } else { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "abts cmpl, completing IO\n"); + + CMD_SP(sc) = NULL; +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ shost_printk(KERN_INFO, fnic->host, ++ "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n", ++ hwq, mqtag, tag); ++ } else { ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); ++ fnic_priv(sc)->io_req = NULL; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + sc->result = (DID_ERROR << 16); + - fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + + + spin_unlock_irqrestore(io_lock, flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + - FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + - sc->device->host->host_no, id, + - sc, + - jiffies_to_msecs(jiffies - start_time), + - desc, + - (((u64)hdr_status << 40) | + - (u64)sc->cmnd[0] << 32 | + - (u64)sc->cmnd[2] << 24 | + - (u64)sc->cmnd[3] << 16 | + - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + - scsi_done(sc); + - atomic64_dec(&fnic_stats->io_stats.active_ios); + - if (atomic64_read(&fnic->io_cmpl_skip)) + - atomic64_dec(&fnic->io_cmpl_skip); + - else + - atomic64_inc(&fnic_stats->io_stats.io_completions); + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + + sc->device->host->host_no, id, + + sc, + + jiffies_to_msecs(jiffies - start_time), + + desc, + + (((u64)hdr_status << 40) | + + (u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | + + CMD_STATE(sc))); + + sc->scsi_done(sc); + + atomic64_dec(&fnic_stats->io_stats.active_ios); + + if (atomic64_read(&fnic->io_cmpl_skip)) + + atomic64_dec(&fnic->io_cmpl_skip); + + else + + atomic64_inc(&fnic_stats->io_stats.io_completions); + + } + } + + + } else if (id & FNIC_TAG_DEV_RST) { + /* Completion of device reset */ +++<<<<<<< HEAD + + CMD_LR_STATUS(sc) = hdr_status; + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; + + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + + sc->device->host->host_no, id, sc, + + jiffies_to_msecs(jiffies - start_time), + + desc, 0, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Terminate pending " + + "dev reset cmpl recd. id %d status %s\n", + + (int)(id & FNIC_TAG_MASK), + + fnic_fcpio_status_to_str(hdr_status)); +++======= ++ shost_printk(KERN_INFO, fnic->host, ++ "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); ++ fnic_priv(sc)->lr_status = hdr_status; ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; ++ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, ++ sc->device->host->host_no, id, sc, ++ jiffies_to_msecs(jiffies - start_time), ++ desc, 0, fnic_flags_and_state(sc)); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return; + } + - if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { + + if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { + /* Need to wait for terminate completion */ + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), +++<<<<<<< HEAD + + desc, 0, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ desc, 0, fnic_flags_and_state(sc)); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "dev reset cmpl recd after time out. " + "id %d status %s\n", + (int)(id & FNIC_TAG_MASK), + fnic_fcpio_status_to_str(hdr_status)); + return; + } +++<<<<<<< HEAD + + CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + + CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "dev reset cmpl recd. id %d status %s\n", + + (int)(id & FNIC_TAG_MASK), + + fnic_fcpio_status_to_str(hdr_status)); +++======= ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n", ++ hwq, mqtag, ++ tag, fnic_fcpio_status_to_str(hdr_status)); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + if (io_req->dr_done) + complete(io_req->dr_done); + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + } else { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "Unexpected itmf io state %s tag %x\n", + + fnic_ioreq_state_to_str(CMD_STATE(sc)), id); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ shost_printk(KERN_ERR, fnic->host, ++ "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n", ++ __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + } +@@@ -1311,7 -1476,7 +1712,11 @@@ static int fnic_fcpio_cmpl_handler(stru + break; + + default: +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "firmware completion type %d\n", + desc->hdr.type); + break; +@@@ -1351,20 -1516,35 +1756,38 @@@ int fnic_wq_copy_cmpl_handler(struct fn + + static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) + { + - struct request *const rq = scsi_cmd_to_rq(sc); + + const int tag = scsi_cmd_to_rq(sc)->tag; + struct fnic *fnic = data; + struct fnic_io_req *io_req; + + unsigned long flags = 0; + + spinlock_t *io_lock; + unsigned long start_time = 0; + - unsigned long flags; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + - uint16_t hwq = 0; + - int tag; + - int mqtag; + + - mqtag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(mqtag); + - tag = blk_mq_unique_tag_to_tag(mqtag); + + io_lock = fnic_io_lock_tag(fnic, tag); + + spin_lock_irqsave(io_lock, flags); + +++<<<<<<< HEAD + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; ++ ++ io_req = fnic_priv(sc)->io_req; ++ if (!io_req) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", ++ hwq, mqtag, tag, fnic_priv(sc)->flags); ++ return true; ++ } ++ ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + /* + * We will be here only when FW completes reset + * without sending completions for outstanding ios. +@@@ -1397,11 -1574,10 +1820,17 @@@ + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + + +cleanup_scsi_cmd: + sc->result = DID_TRANSPORT_DISRUPTED << 16; +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + + tag, sc, jiffies - start_time); +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", ++ mqtag, tag, sc, (jiffies - start_time)); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); +@@@ -1429,10 -1599,46 +1858,51 @@@ + return true; + } + + -static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) + +static void fnic_cleanup_io(struct fnic *fnic) + { +++<<<<<<< HEAD + + scsi_host_busy_iter(fnic->lport->host, + + fnic_cleanup_io_iter, fnic); +++======= ++ unsigned int io_count = 0; ++ unsigned long flags; ++ struct fnic_io_req *io_req = NULL; ++ struct scsi_cmnd *sc = NULL; ++ ++ io_count = fnic_count_all_ioreqs(fnic); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "Outstanding ioreq count: %d active io count: %lld Waiting\n", ++ io_count, ++ atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); ++ ++ scsi_host_busy_iter(fnic->host, ++ fnic_cleanup_io_iter, fnic); ++ ++ /* with sg3utils device reset, SC needs to be retrieved from ioreq */ ++ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); ++ io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id]; ++ if (io_req) { ++ sc = io_req->sc; ++ if (sc) { ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) ++ && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; ++ if (io_req && io_req->dr_done) ++ complete(io_req->dr_done); ++ } ++ } ++ } ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); ++ ++ while ((io_count = fnic_count_all_ioreqs(fnic))) { ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "Outstanding ioreq count: %d active io count: %lld Waiting\n", ++ io_count, ++ atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); ++ ++ schedule_timeout(msecs_to_jiffies(100)); ++ } +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, +@@@ -1480,20 -1688,18 +1950,24 @@@ + + wq_copy_cleanup_scsi_cmd: + sc->result = DID_NO_CONNECT << 16; +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:" +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + " DID_NO_CONNECT\n"); + + - FNIC_TRACE(fnic_wq_copy_cleanup_handler, + - sc->device->host->host_no, id, sc, + - jiffies_to_msecs(jiffies - start_time), + - 0, ((u64)sc->cmnd[0] << 32 | + - (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_wq_copy_cleanup_handler, + + sc->device->host->host_no, id, sc, + + jiffies_to_msecs(jiffies - start_time), + + 0, ((u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + - scsi_done(sc); + + sc->scsi_done(sc); + + } + } + + static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, +@@@ -1512,17 -1721,18 +1986,22 @@@ + return 1; + } else + atomic_inc(&fnic->in_flight); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, flags); + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + atomic_dec(&fnic->in_flight); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ atomic_dec(&tport->in_flight); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "fnic_queue_abort_io_req: failure: no descriptors\n"); + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); + return 1; +@@@ -1561,23 -1770,31 +2040,40 @@@ static bool fnic_rport_abort_io_iter(st + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct scsi_lun fc_lun; + enum fnic_ioreq_state old_ioreq_state; + - uint16_t hwq = 0; + - unsigned long flags; + + - abt_tag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(abt_tag); + + io_lock = fnic_io_lock_tag(fnic, abt_tag); + + spin_lock_irqsave(io_lock, flags); + +++<<<<<<< HEAD + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ if (!sc) { ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq); ++ return true; ++ } +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - io_req = fnic_priv(sc)->io_req; + if (!io_req || io_req->port_id != iter_data->port_id) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + return true; + } + +++<<<<<<< HEAD + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", + + sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n", ++ hwq, abt_tag, fnic_priv(sc)->flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return true; + } + +@@@ -1585,41 -1802,44 +2081,65 @@@ + * Found IO that is still pending with firmware and + * belongs to rport that went away + */ + - if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + return true; + } + - + if (io_req->abts_done) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic_rport_exch_reset: io_req->abts_done is set " + + "state is %s\n", + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + } + + + + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "rport_exch_reset " + + "IO not yet issued %p tag 0x%x flags " + + "%x state %d\n", + + sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc)); +++======= ++ shost_printk(KERN_ERR, fnic->host, ++ "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n", ++ fnic_ioreq_state_to_str(fnic_priv(sc)->state)); ++ } ++ ++ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { ++ shost_printk(KERN_ERR, fnic->host, ++ "rport_exch_reset IO not yet issued %p abt_tag 0x%x", ++ sc, abt_tag); ++ shost_printk(KERN_ERR, fnic->host, ++ "flags %x state %d\n", fnic_priv(sc)->flags, ++ fnic_priv(sc)->state); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + - old_ioreq_state = fnic_priv(sc)->state; + - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + - fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + - + - if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + + old_ioreq_state = CMD_STATE(sc); + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + atomic64_inc(&reset_stats->device_reset_terminates); + abt_tag |= FNIC_TAG_DEV_RST; ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "dev reset sc 0x%p\n", sc); + } +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); + + BUG_ON(io_req->abts_done); + + + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc); ++ WARN_ON_ONCE(io_req->abts_done); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "fnic_rport_reset_exch: Issuing abts\n"); + + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + - /* Queue the abort command to firmware */ + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, +@@@ -1631,17 -1851,20 +2151,27 @@@ + * aborted later by scsi_eh, or cleaned up during + * lun reset + */ +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n", ++ hwq, abt_tag, fnic_priv(sc)->flags); ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) ++ fnic_priv(sc)->state = old_ioreq_state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } else { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + else + - fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + iter_data->term_cnt++; + } +@@@ -1657,54 -1883,115 +2187,113 @@@ static void fnic_rport_exch_reset(struc + .term_cnt = 0, + }; + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "fnic_rport_exch_reset called portid 0x%06x\n", + + port_id); +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "fnic rport exchange reset for tport: 0x%06x\n", ++ port_id); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + if (fnic->in_remove) + return; + +++<<<<<<< HEAD + + scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, +++======= ++ io_count = fnic_count_ioreqs(fnic, port_id); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n", ++ port_id, io_count, ++ atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ /* Bump in_flight counter to hold off fnic_fw_reset_handler. */ ++ atomic_inc(&fnic->in_flight); ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { ++ atomic_dec(&fnic->in_flight); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + &iter_data); + - + if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) + atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); + +++<<<<<<< HEAD +++======= ++ atomic_dec(&fnic->in_flight); ++ ++ while ((io_count = fnic_count_ioreqs(fnic, port_id))) ++ schedule_timeout(msecs_to_jiffies(1000)); ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "rport: 0x%x remaining portid-io-count: %d ", ++ port_id, io_count); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + void fnic_terminate_rport_io(struct fc_rport *rport) + { + - struct fnic_tport_s *tport; + - struct rport_dd_data_s *rdd_data; + - struct fnic_iport_s *iport = NULL; + - struct fnic *fnic = NULL; + + struct fc_rport_libfc_priv *rdata; + + struct fc_lport *lport; + + struct fnic *fnic; + + if (!rport) { + - pr_err("rport is NULL\n"); + + printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + return; + } + + rdata = rport->dd_data; + + - rdd_data = rport->dd_data; + - if (rdd_data) { + - tport = rdd_data->tport; + - if (!tport) { + - pr_err( + - "term rport io called after tport is deleted. Returning 0x%8x\n", + - rport->port_id); + - } else { + - pr_err( + - "term rport io called after tport is set 0x%8x\n", + - rport->port_id); + - pr_err( + - "tport maybe rediscovered\n"); + - + - iport = (struct fnic_iport_s *) tport->iport; + - fnic = iport->fnic; + - fnic_rport_exch_reset(fnic, rport->port_id); + - } + + if (!rdata) { + + printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); + + return; + } + -} + + lport = rdata->local_port; + + -/* + - * FCP-SCSI specific handling for module unload + - * + - */ + -void fnic_scsi_unload(struct fnic *fnic) + -{ + - unsigned long flags; + + if (!lport) { + + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); + + return; + + } + + fnic = lport_priv(lport); + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, "fnic_terminate_rport_io called" + + " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", + + rport->port_name, rport->node_name, rport, + + rport->port_id); + + - /* + - * Mark state so that the workqueue thread stops forwarding + - * received frames and link events to the local port. ISR and + - * other threads that can queue work items will also stop + - * creating work items on the fnic workqueue + - */ + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->in_remove) + + return; + +++<<<<<<< HEAD + + fnic_rport_exch_reset(fnic, rport->port_id); +++======= ++ if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) ++ fnic_scsi_fcpio_reset(fnic); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ fnic->in_remove = 1; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ fnic_flush_tport_event_list(fnic); ++ fnic_delete_fcp_tports(fnic); ++ } ++ ++ void fnic_scsi_unload_cleanup(struct fnic *fnic) ++ { ++ int hwq = 0; ++ ++ fc_remove_host(fnic->host); ++ scsi_remove_host(fnic->host); ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) ++ kfree(fnic->sw_copy_wq[hwq].io_req_table); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + /* +@@@ -1745,18 -2037,55 +2334,63 @@@ int fnic_abort_cmd(struct scsi_cmnd *sc + term_stats = &fnic->fnic_stats.term_stats; + + rport = starget_to_rport(scsi_target(sc->device)); + - mqtag = blk_mq_unique_tag(rq); + - hwq = blk_mq_unique_tag_to_hwq(mqtag); + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", + + rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); + + - fnic_priv(sc)->flags = FNIC_NO_FLAGS; + + CMD_FLAGS(sc) = FNIC_NO_FLAGS; + +++<<<<<<< HEAD + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) { +++======= ++ rdd_data = rport->dd_data; ++ tport = rdd_data->tport; ++ ++ if (!tport) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Abort cmd called after tport delete! rport fcid: 0x%x", ++ rport->port_id); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n", ++ sc->device->lun, hwq, mqtag, ++ sc->cmnd[0], fnic_priv(sc)->flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) ++ ret = FAILED; + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_abort_cmd_end; ++ } ++ +++<<<<<<< HEAD +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x", ++ rport->port_id, sc->device->lun, hwq, mqtag); ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Op: 0x%x flags: 0x%x\n", ++ sc->cmnd[0], ++ fnic_priv(sc)->flags); ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "iport NOT in READY state"); + ret = FAILED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + ++ if ((tport->state != FDLS_TGT_STATE_READY) && ++ (tport->state != FDLS_TGT_STATE_ADISC)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "tport state: %d\n", tport->state); ++ ret = FAILED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_abort_cmd_end; ++ } ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + /* + * Avoid a race between SCSI issuing the abort and the device + * completing the command. +@@@ -1800,8 -2129,9 +2434,14 @@@ + else + atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "CDB Opcode: 0x%02x Abort issued time: %lu msec\n", ++ sc->cmnd[0], abt_issued_time); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + /* + * Command is still pending, need to abort it + * If the firmware completes the command after this point, +@@@ -1888,10 -2218,10 +2528,17 @@@ + + /* IO out of order */ + +++<<<<<<< HEAD + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Issuing Host reset due to out of order IO\n"); +++======= ++ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Issuing host reset due to out of order IO\n"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + ret = FAILED; + goto fnic_abort_cmd_end; +@@@ -1935,9 -2266,9 +2582,13 @@@ fnic_abort_cmd_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "Returning from abort cmd type %x %s\n", task_req, + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); +@@@ -1953,24 -2283,33 +2604,28 @@@ static inline int fnic_queue_dr_io_req( + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + struct scsi_lun fc_lun; + int ret = 0; + - unsigned long flags; + - uint16_t hwq = 0; + - uint32_t tag = 0; + - struct fnic_tport_s *tport = io_req->tport; + - + - tag = io_req->tag; + - hwq = blk_mq_unique_tag_to_hwq(tag); + - wq = &fnic->hw_copy_wq[hwq]; + + unsigned long intr_flags; + + - spin_lock_irqsave(&fnic->fnic_lock, flags); + + spin_lock_irqsave(host->host_lock, intr_flags); + if (unlikely(fnic_chk_state_flags_locked(fnic, + FNIC_FLAGS_IO_BLOCKED))) { + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, intr_flags); + return FAILED; + - } else { + + } else + atomic_inc(&fnic->in_flight); + - atomic_inc(&tport->in_flight); + - } + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, intr_flags); + + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + + - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + - free_wq_copy_descs(fnic, wq, hwq); + + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) + + free_wq_copy_descs(fnic, wq); + + if (!vnic_wq_copy_desc_avail(wq)) { +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "queue_dr_io_req failure - no descriptors\n"); + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); + ret = -EAGAIN; +@@@ -2033,28 -2377,27 +2688,40 @@@ static bool fnic_pending_aborts_iter(st + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "Found IO in %s on lun\n", + - fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + - if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + return true; + } +++<<<<<<< HEAD + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "%s dev rst not pending sc 0x%p\n", __func__, + + sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "dev rst not pending sc 0x%p\n", sc); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return true; + } + + if (io_req->abts_done) +- shost_printk(KERN_ERR, fnic->lport->host, ++ shost_printk(KERN_ERR, fnic->host, + "%s: io_req->abts_done is set state is %s\n", + - __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + - old_ioreq_state = fnic_priv(sc)->state; + + __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); + + old_ioreq_state = CMD_STATE(sc); + /* + * Any pending IO issued prior to reset is expected to be + * in abts pending state, if not we need to set +@@@ -2066,38 -2409,40 +2733,50 @@@ + + BUG_ON(io_req->abts_done); + +++<<<<<<< HEAD + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + + abt_tag |= FNIC_TAG_DEV_RST; + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "%s: dev rst sc 0x%p\n", __func__, sc); +++======= ++ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "dev rst sc 0x%p\n", sc); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + - fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + io_req->abts_done = &tm_done; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, + FCPIO_ITMF_ABT_TASK_TERM, + - fc_lun.scsi_lun, io_req, hwq)) { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - io_req = fnic_priv(sc)->io_req; + + fc_lun.scsi_lun, io_req)) { + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (io_req) + io_req->abts_done = NULL; + - if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + - fnic_priv(sc)->state = old_ioreq_state; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + spin_unlock_irqrestore(io_lock, flags); + iter_data->ret = FAILED; +++<<<<<<< HEAD +++======= ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "hwq: %d abt_tag: 0x%lx Abort could not be queued\n", ++ hwq, abt_tag); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return false; + } else { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); + } + - fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + + wait_for_completion_timeout(&tm_done, msecs_to_jiffies + (fnic->config.ed_tov)); +@@@ -2163,10 -2509,9 +2842,10 @@@ static int fnic_clean_pending_aborts(st + .ret = SUCCESS, + }; + + - iter_data.lr_sc = lr_sc; + + if (new_sc) + + iter_data.lr_sc = lr_sc; + +- scsi_host_busy_iter(fnic->lport->host, ++ scsi_host_busy_iter(fnic->host, + fnic_pending_aborts_iter, &iter_data); + if (iter_data.ret == FAILED) { + ret = iter_data.ret; +@@@ -2179,8 -2524,8 +2858,13 @@@ + ret = 1; + + clean_pending_aborts_end: +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "%s: exit status: %d\n", __func__, ret); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "exit status: %d\n", ret); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return ret; + } + +@@@ -2222,12 -2572,39 +2906,47 @@@ int fnic_device_reset(struct scsi_cmnd + atomic64_inc(&reset_stats->device_resets); + + rport = starget_to_rport(scsi_target(sc->device)); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", + + rport->port_id, sc->device->lun, sc); + +++<<<<<<< HEAD + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + + goto fnic_device_reset_end; +++======= ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", ++ rport->port_id, sc->device->lun, hwq, mqtag, ++ fnic_priv(sc)->flags); ++ ++ rdd_data = rport->dd_data; ++ tport = rdd_data->tport; ++ if (!tport) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n", ++ rport->port_id, sc->device->lun); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "iport NOT in READY state"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) && ++ (tport->state != FDLS_TGT_STATE_ADISC)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "tport state: %d\n", tport->state); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + /* Check if remote port up */ + if (fc_remote_port_chkready(rport)) { +@@@ -2265,14 -2645,24 +2984,18 @@@ + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; + - io_req->tag = mqtag; + - fnic_priv(sc)->io_req = io_req; + - io_req->tport = tport; + - io_req->sc = sc; + - + - if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) + - WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n", + - fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); + - + - fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = + - io_req; + + CMD_SP(sc) = (char *)io_req; + } + io_req->dr_done = &tm_done; + - fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + - fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; + + spin_unlock_irqrestore(io_lock, flags); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + /* + * issue the device reset, if enqueue failed, clean up the ioreq +@@@ -2296,17 -2691,42 +3019,32 @@@ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + + - /* + - * Wake up can be due to the following reasons: + - * 1) The device reset completed from target. + - * 2) Device reset timed out. + - * 3) A link-down/host_reset may have happened in between. + - * 4) The device reset was aborted and io_req->dr_done was called. + - */ + - + - exit_dr = 0; + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - if ((old_link_down_cnt != fnic->link_down_cnt) || + - (fnic->reset_in_progress) || + - (fnic->soft_reset_count != old_soft_reset_count) || + - (iport->state != FNIC_IPORT_STATE_READY)) + - exit_dr = 1; + - + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + - io_req = fnic_priv(sc)->io_req; + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "io_req is null tag 0x%x sc 0x%p\n", tag, sc); + + goto fnic_device_reset_end; + + } +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); ++ goto fnic_device_reset_end; ++ } ++ ++ if (exit_dr) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Host reset called for fnic. Exit device reset\n"); ++ io_req->dr_done = NULL; ++ goto fnic_device_reset_clean; ++ } +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + io_req->dr_done = NULL; + + - status = fnic_priv(sc)->lr_status; + + status = CMD_LR_STATUS(sc); + + /* + * If lun reset not completed, bail out with failed. io_req +@@@ -2314,64 -2734,22 +3052,72 @@@ + */ + if (status == FCPIO_INVALID_CODE) { + atomic64_inc(&reset_stats->device_reset_timeouts); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "Device reset timed out\n"); + - fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; + + spin_unlock_irqrestore(io_lock, flags); + int_to_scsilun(sc->device->lun, &fc_lun); + - goto fnic_device_reset_clean; + + /* + + * Issue abort and terminate on device reset request. + + * If q'ing of terminate fails, retry it after a delay. + + */ + + while (1) { + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { + + spin_unlock_irqrestore(io_lock, flags); + + break; + + } + + spin_unlock_irqrestore(io_lock, flags); + + if (fnic_queue_abort_io_req(fnic, + + tag | FNIC_TAG_DEV_RST, + + FCPIO_ITMF_ABT_TASK_TERM, + + fc_lun.scsi_lun, io_req)) { + + wait_for_completion_timeout(&tm_done, + + msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); + + } else { + + spin_lock_irqsave(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + + io_req->abts_done = &tm_done; + + spin_unlock_irqrestore(io_lock, flags); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Abort and terminate issued on Device reset " + + "tag 0x%x sc 0x%p\n", tag, sc); + + break; + + } + + } + + while (1) { + + spin_lock_irqsave(io_lock, flags); + + if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { + + spin_unlock_irqrestore(io_lock, flags); + + wait_for_completion_timeout(&tm_done, + + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + + break; + + } else { + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + io_req->abts_done = NULL; + + goto fnic_device_reset_clean; + + } + + } + } else { + - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_unlock_irqrestore(io_lock, flags); + } + + /* Completed, but not successful, clean up the io_req, return fail */ + if (status != FCPIO_SUCCESS) { + - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, +++<<<<<<< HEAD + + fnic->lport->host, +++======= ++ fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "Device reset completed - failed\n"); + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + goto fnic_device_reset_clean; + } + +@@@ -2383,11 -2761,10 +3129,18 @@@ + * succeeds + */ + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Device reset failed" + + " since could not abort all IOs\n"); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, ++ "Device reset failed: Cannot abort all IOs\n"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + goto fnic_device_reset_clean; + } + +@@@ -2416,13 -2805,25 +3169,28 @@@ fnic_device_reset_end + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + - if (new_sc) { + - fnic->sgreset_sc = NULL; + - mutex_unlock(&fnic->sgreset_mutex); + - } + + /* free tag if it is allocated */ + + if (unlikely(tag_gen_flag)) + + fnic_scsi_host_end_tag(fnic, sc); + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +++======= ++ while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { ++ if (count >= 2) { ++ ret = FAILED; ++ break; ++ } ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "Cannot clean up all IOs for the LUN\n"); ++ schedule_timeout(msecs_to_jiffies(1000)); ++ count++; ++ } ++ ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + "Returning from device reset %s\n", + (ret == SUCCESS) ? + "SUCCESS" : "FAILED"); +@@@ -2433,68 -2834,78 +3201,99 @@@ + return ret; + } + + -static void fnic_post_flogo_linkflap(struct fnic *fnic) + -{ + - unsigned long flags; + - + - fnic_fdls_link_status_change(fnic, 0); + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - + - if (fnic->link_status) { + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - fnic_fdls_link_status_change(fnic, 1); + - return; + - } + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + -} + - + -/* Logout from all the targets and simulate link flap */ + -void fnic_reset(struct Scsi_Host *shost) + +/* Clean up all IOs, clean up libFC local port */ + +int fnic_reset(struct Scsi_Host *shost) + { + + struct fc_lport *lp; + struct fnic *fnic; + + int ret = 0; + struct reset_stats *reset_stats; + + - fnic = *((struct fnic **) shost_priv(shost)); + + lp = shost_priv(shost); + + fnic = lport_priv(lp); + reset_stats = &fnic->fnic_stats.reset_stats; + +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_reset called\n"); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Issuing fnic reset\n"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + atomic64_inc(&reset_stats->fnic_resets); + - fnic_post_flogo_linkflap(fnic); + +++<<<<<<< HEAD + + /* + + * Reset local port, this will clean up libFC exchanges, + + * reset remote port sessions, and if link is up, begin flogi + + */ + + ret = fc_lport_reset(lp); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Returning from fnic reset"); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + + - atomic64_inc(&reset_stats->fnic_reset_completions); + -} + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Returning from fnic reset %s\n", + + (ret == 0) ? + + "SUCCESS" : "FAILED"); + + -int fnic_issue_fc_host_lip(struct Scsi_Host *shost) + -{ + - int ret = 0; + - struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + if (ret == 0) + + atomic64_inc(&reset_stats->fnic_reset_completions); + + else + + atomic64_inc(&reset_stats->fnic_reset_failures); + +++<<<<<<< HEAD +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "FC host lip issued"); ++ ++ ret = fnic_host_reset(shost); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + return ret; + } + + -int fnic_host_reset(struct Scsi_Host *shost) + +/* + + * SCSI Error handling calls driver's eh_host_reset if all prior + + * error handling levels return FAILED. If host reset completes + + * successfully, and if link is up, then Fabric login begins. + + * + + * Host Reset is the highest level of error recovery. If this fails, then + + * host is offlined by SCSI. + + * + + */ + +int fnic_host_reset(struct scsi_cmnd *sc) + { + - int ret = SUCCESS; + + int ret; + unsigned long wait_host_tmo; + - struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + struct Scsi_Host *shost = sc->device->host; + + struct fc_lport *lp = shost_priv(shost); + + struct fnic *fnic = lport_priv(lp); + unsigned long flags; + - struct fnic_iport_s *iport = &fnic->iport; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + - if (fnic->reset_in_progress == NOT_IN_PROGRESS) { + - fnic->reset_in_progress = IN_PROGRESS; + + if (!fnic->internal_reset_inprogress) { + + fnic->internal_reset_inprogress = true; + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "host reset in progress skipping another host reset\n"); + + return SUCCESS; +++======= ++ wait_for_completion_timeout(&fnic->reset_completion_wait, ++ msecs_to_jiffies(10000)); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (fnic->reset_in_progress == IN_PROGRESS) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, ++ "Firmware reset in progress. Skipping another host reset\n"); ++ return SUCCESS; ++ } ++ fnic->reset_in_progress = IN_PROGRESS; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +@@@ -2513,130 -2935,13 +3312,136 @@@ + ret = SUCCESS; + break; + } + + ssleep(1); + } + } + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + fnic->internal_reset_inprogress = false; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return ret; + +} + + + +/* + + * This fxn is called from libFC when host is removed + + */ + +void fnic_scsi_abort_io(struct fc_lport *lp) + +{ + + int err = 0; + + unsigned long flags; + + enum fnic_state old_state; + + struct fnic *fnic = lport_priv(lp); + + DECLARE_COMPLETION_ONSTACK(remove_wait); + + + /* Issue firmware reset for fnic, wait for reset to complete */ + +retry_fw_reset: + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && + + fnic->link_events) { + + /* fw reset is in progress, poll for its completion */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_timeout(msecs_to_jiffies(100)); + + goto retry_fw_reset; + + } + + + + fnic->remove_wait = &remove_wait; + + old_state = fnic->state; + + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + +++<<<<<<< HEAD + + err = fnic_fw_reset_handler(fnic); + + if (err) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + + fnic->state = old_state; + + fnic->remove_wait = NULL; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + /* Wait for firmware reset to complete */ + + wait_for_completion_timeout(&remove_wait, + + msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + fnic->remove_wait = NULL; + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic_scsi_abort_io %s\n", + + (fnic->state == FNIC_IN_ETH_MODE) ? + + "SUCCESS" : "FAILED"); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + +} + + + +/* + + * This fxn called from libFC to clean up driver IO state on link down + + */ + +void fnic_scsi_cleanup(struct fc_lport *lp) + +{ + + unsigned long flags; + + enum fnic_state old_state; + + struct fnic *fnic = lport_priv(lp); + + + + /* issue fw reset */ + +retry_fw_reset: + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { + + /* fw reset is in progress, poll for its completion */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_timeout(msecs_to_jiffies(100)); + + goto retry_fw_reset; + + } + + old_state = fnic->state; + + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (fnic_fw_reset_handler(fnic)) { + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + + fnic->state = old_state; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + } + + + +} + + + +void fnic_empty_scsi_cleanup(struct fc_lport *lp) + +{ + +} + + + +void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) + +{ + + struct fnic *fnic = lport_priv(lp); + + + + /* Non-zero sid, nothing to do */ + + if (sid) + + goto call_fc_exch_mgr_reset; + + + + if (did) { + + fnic_rport_exch_reset(fnic, did); + + goto call_fc_exch_mgr_reset; + + } + + + + /* + + * sid = 0, did = 0 + + * link down or device being removed + + */ + + if (!fnic->in_remove) + + fnic_scsi_cleanup(lp); + + else + + fnic_scsi_abort_io(lp); + + + + /* call libFC exch mgr reset to reset its exchanges */ + +call_fc_exch_mgr_reset: + + fc_exch_mgr_reset(lp, sid, did); + + +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "host reset return status: %d\n", ret); ++ return ret; +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + } + + static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) +@@@ -2670,11 -2979,12 +3475,20 @@@ + * Found IO that is still pending with firmware and + * belongs to the LUN that we are resetting + */ +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + + "Found IO in %s on lun\n", + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + cmd_state = CMD_STATE(sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "hwq: %d tag: 0x%x Found IO in state: %s on lun\n", ++ hwq, tag, ++ fnic_ioreq_state_to_str(fnic_priv(sc)->state)); ++ cmd_state = fnic_priv(sc)->state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) + if (cmd_state == FNIC_IOREQ_ABTS_PENDING) + iter_data->ret = 1; + +@@@ -2707,3 -3017,76 +3521,79 @@@ int fnic_is_abts_pending(struct fnic *f + + return iter_data.ret; + } +++<<<<<<< HEAD +++======= ++ ++ /* ++ * SCSI Error handling calls driver's eh_host_reset if all prior ++ * error handling levels return FAILED. If host reset completes ++ * successfully, and if link is up, then Fabric login begins. ++ * ++ * Host Reset is the highest level of error recovery. If this fails, then ++ * host is offlined by SCSI. ++ * ++ */ ++ int fnic_eh_host_reset_handler(struct scsi_cmnd *sc) ++ { ++ int ret = 0; ++ struct Scsi_Host *shost = sc->device->host; ++ struct fnic *fnic = *((struct fnic **) shost_priv(shost)); ++ ++ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, ++ "SCSI error handling: fnic host reset"); ++ ++ ret = fnic_host_reset(shost); ++ return ret; ++ } ++ ++ ++ void fnic_scsi_fcpio_reset(struct fnic *fnic) ++ { ++ unsigned long flags; ++ enum fnic_state old_state; ++ struct fnic_iport_s *iport = &fnic->iport; ++ DECLARE_COMPLETION_ONSTACK(fw_reset_done); ++ int time_remain; ++ ++ /* issue fw reset */ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { ++ /* fw reset is in progress, poll for its completion */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "fnic is in unexpected state: %d for fw_reset\n", ++ fnic->state); ++ return; ++ } ++ ++ old_state = fnic->state; ++ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; ++ ++ fnic_update_mac_locked(fnic, iport->hwmac); ++ fnic->fw_reset_done = &fw_reset_done; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Issuing fw reset\n"); ++ if (fnic_fw_reset_handler(fnic)) { ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) ++ fnic->state = old_state; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } else { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Waiting for fw completion\n"); ++ time_remain = wait_for_completion_timeout(&fw_reset_done, ++ msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "Woken up after fw completion timeout\n"); ++ if (time_remain == 0) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, ++ "FW reset completion timed out after %d ms)\n", ++ FNIC_FW_RESET_TIMEOUT); ++ } ++ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); ++ } ++ fnic->fw_reset_done = NULL; ++ } +++>>>>>>> 7e6886b705fd (scsi: fnic: Code cleanup) +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fip.h +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fip.h +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c +index 1a4151ef90c1..a024556836d4 100644 +--- a/drivers/scsi/fnic/fnic_debugfs.c ++++ b/drivers/scsi/fnic/fnic_debugfs.c +@@ -691,7 +691,7 @@ void fnic_stats_debugfs_init(struct fnic *fnic) + { + char name[16]; + +- snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); ++ snprintf(name, sizeof(name), "host%d", fnic->host->host_no); + + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, + fnic_stats_debugfs_root); +* Unmerged path drivers/scsi/fnic/fnic_fcs.c +diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c +index 8ce488a5e6ee..74e72f196511 100644 +--- a/drivers/scsi/fnic/fnic_isr.c ++++ b/drivers/scsi/fnic/fnic_isr.c +@@ -19,7 +19,7 @@ + #include + #include + #include +-#include ++#include + #include + #include "vnic_dev.h" + #include "vnic_intr.h" +@@ -234,7 +234,7 @@ int fnic_request_intr(struct fnic *fnic) + fnic->msix[i].devname, + fnic->msix[i].devid); + if (err) { +- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "request_irq failed with error: %d\n", + err); + fnic_free_intr(fnic); +@@ -262,10 +262,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) + * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs + * (last INTR is used for WQ/RQ errors and notification area) + */ +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "rq-array size: %d wq-array size: %d copy-wq array size: %d\n", + n, m, o); +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n", + fnic->rq_count, fnic->raw_wq_count, + fnic->wq_copy_count, fnic->cq_count); +@@ -277,17 +277,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) + + vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "allocated %d MSI-X vectors\n", + vec_count); + + if (vec_count > 0) { + if (vec_count < vecs) { +- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "interrupts number mismatch: vec_count: %d vecs: %d\n", + vec_count, vecs); + if (vec_count < min_irqs) { +- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "no interrupts for copy wq\n"); + return 1; + } +@@ -299,7 +299,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) + fnic->wq_copy_count = vec_count - n - m - 1; + fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count; + if (fnic->cq_count != vec_count - 1) { +- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "CQ count: %d does not match MSI-X vector count: %d\n", + fnic->cq_count, vec_count); + fnic->cq_count = vec_count - 1; +@@ -307,23 +307,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) + fnic->intr_count = vec_count; + fnic->err_intr_offset = fnic->rq_count + fnic->wq_count; + +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "rq_count: %d raw_wq_count: %d copy_wq_base: %d\n", + fnic->rq_count, + fnic->raw_wq_count, fnic->copy_wq_base); + +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "wq_copy_count: %d wq_count: %d cq_count: %d\n", + fnic->wq_copy_count, + fnic->wq_count, fnic->cq_count); + +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "intr_count: %d err_intr_offset: %u", + fnic->intr_count, + fnic->err_intr_offset); + + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); +- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic using MSI-X\n"); + return 0; + } +@@ -363,7 +363,7 @@ int fnic_set_intr_mode(struct fnic *fnic) + fnic->intr_count = 1; + fnic->err_intr_offset = 0; + +- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Using MSI Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); + +@@ -389,7 +389,7 @@ int fnic_set_intr_mode(struct fnic *fnic) + fnic->cq_count = 3; + fnic->intr_count = 3; + +- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Using Legacy Interrupts\n"); + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); + +* Unmerged path drivers/scsi/fnic/fnic_main.c +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7f5dce6e.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7f5dce6e.failed new file mode 100644 index 0000000000000..e0a04f7eb26ee --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7f5dce6e.failed @@ -0,0 +1,29 @@ +scsi: fnic: Replace fnic->lock_flags with local flags + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 7f5dce6e7f0150ee57b8d1186011f57fa62c2843 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/7f5dce6e.failed + +Replace fnic->lock_flags with local variable for usage with spinlocks in +fdls_schedule_oxid_free_retry_work(). + + Suggested-by: Dan Carpenter +Fixes: a63e78eb2b0f ("scsi: fnic: Add support for fabric based solicited requests and responses") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250301013712.3115-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 7f5dce6e7f0150ee57b8d1186011f57fa62c2843) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/848d010a.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/848d010a.failed new file mode 100644 index 0000000000000..3a678527ca3b9 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/848d010a.failed @@ -0,0 +1,68 @@ +scsi: fnic: Remove usage of host_lock + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 848d010ab934f1b4326a516396873ddae41db056 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/848d010a.failed + +Remove usage of host_lock. Replace with fnic_lock, where necessary. fnic +does not use host_lock. fnic uses fnic_lock. Use fnic lock to protect fnic +members in fnic_queuecommand. Add log messages in error cases. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20231211173617.932990-10-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 848d010ab934f1b4326a516396873ddae41db056) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_scsi.c +index a9f65dc3f089,e9acb2e7dd2e..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -490,14 -497,9 +508,20 @@@ static int fnic_queuecommand_lck(struc + + atomic_inc(&fnic->in_flight); + +++<<<<<<< HEAD + + /* + + * Release host lock, use driver resource specific locks from here. + + * Don't re-enable interrupts in case they were disabled prior to the + + * caller disabling them. + + */ + + spin_unlock(lp->host->host_lock); + + CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; + + CMD_FLAGS(sc) = FNIC_NO_FLAGS; +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; ++ fnic_priv(sc)->flags = FNIC_NO_FLAGS; +++>>>>>>> 848d010ab934 (scsi: fnic: Remove usage of host_lock) + + /* Get a new io_req for this SCSI IO */ + io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); +@@@ -1954,11 -1951,15 +1972,11 @@@ static inline int fnic_queue_dr_io_req( + struct scsi_lun fc_lun; + int ret = 0; + unsigned long intr_flags; + - unsigned int tag = scsi_cmd_to_rq(sc)->tag; + - + - if (tag == SCSI_NO_TAG) + - tag = io_req->tag; + +- spin_lock_irqsave(host->host_lock, intr_flags); ++ spin_lock_irqsave(&fnic->fnic_lock, intr_flags); + if (unlikely(fnic_chk_state_flags_locked(fnic, + FNIC_FLAGS_IO_BLOCKED))) { +- spin_unlock_irqrestore(host->host_lock, intr_flags); ++ spin_unlock_irqrestore(&fnic->fnic_lock, intr_flags); + return FAILED; + } else + atomic_inc(&fnic->in_flight); +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/85d6fbc4.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/85d6fbc4.failed new file mode 100644 index 0000000000000..a2143af3f12b8 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/85d6fbc4.failed @@ -0,0 +1,93 @@ +scsi: fnic: Fix missing DMA mapping error in fnic_send_frame() + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Thomas Fourier +commit 85d6fbc47c3087c5d048e6734926b0c36af34fe9 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/85d6fbc4.failed + +dma_map_XXX() can fail and should be tested for errors with +dma_mapping_error(). + +Fixes: a63e78eb2b0f ("scsi: fnic: Add support for fabric based solicited requests and responses") + Signed-off-by: Thomas Fourier +Link: https://lore.kernel.org/r/20250618065715.14740-2-fourier.thomas@gmail.com + Reviewed-by: Karan Tilak Kumar + Reviewed-by: John Menghini + Signed-off-by: Martin K. Petersen +(cherry picked from commit 85d6fbc47c3087c5d048e6734926b0c36af34fe9) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_fcs.c +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,103ab6f1f7cd..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -1086,52 -635,15 +1086,58 @@@ static int fnic_send_frame(struct fnic + int ret = 0; + unsigned long flags; + +++<<<<<<< HEAD + + fh = fc_frame_header_get(fp); + + skb = fp_skb(fp); +++======= ++ pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(&fnic->pdev->dev, pa)) ++ return -ENOMEM; +++>>>>>>> 85d6fbc47c30 (scsi: fnic: Fix missing DMA mapping error in fnic_send_frame()) + + - if ((fnic_fc_trace_set_data(fnic->fnic_num, + - FNIC_FC_SEND | 0x80, (char *) frame, + - frame_len)) != 0) { + - FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + - "fnic ctlr frame trace error"); + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && + + fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) + + return 0; + + + + if (!fnic->vlan_hw_insert) { + + eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); + + vlan_hdr = skb_push(skb, eth_hdr_len); + + eth_hdr = (struct ethhdr *)vlan_hdr; + + vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); + + vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); + + vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + + fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); + + } else { + + eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); + + eth_hdr = skb_push(skb, eth_hdr_len); + + eth_hdr->h_proto = htons(ETH_P_FCOE); + + fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); + + } + + + + if (fnic->ctlr.map_dest) + + fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); + + else + + memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); + + memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); + + + + tot_len = skb->len; + + BUG_ON(tot_len % 4); + + + + memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); + + fcoe_hdr->fcoe_sof = fr_sof(fp); + + if (FC_FCOE_VER) + + FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); + + + + pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); + + if (dma_mapping_error(&fnic->pdev->dev, pa)) { + + ret = -ENOMEM; + + printk(KERN_ERR "DMA map failed with error %d\n", ret); + + goto free_skb_on_err; + + } + + + + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, + + (char *)eth_hdr, tot_len)) != 0) { + + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + + spin_lock_irqsave(&fnic->wq_lock[0], flags); +* Unmerged path drivers/scsi/fnic/fnic_fcs.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/86979346.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/86979346.failed new file mode 100644 index 0000000000000..10ac333e00937 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/86979346.failed @@ -0,0 +1,115 @@ +scsi: fnic: Propagate SCSI error code from fnic_scsi_drv_init() + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Arun Easi +commit 8697934682f1873b7b1cb9cc61b81edf042c9272 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/86979346.failed + +Propagate scsi_add_host() error instead of returning -1. + + Suggested-by: Dan Carpenter + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Signed-off-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250110091956.17749-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 8697934682f1873b7b1cb9cc61b81edf042c9272) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,2f626b860f7a..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -571,23 -607,71 +571,81 @@@ static int fnic_scsi_drv_init(struct fn + fnic->fnic_max_tag_id = host->can_queue; + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; + - host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; + + host->max_cmd_len = FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + + if (host->nr_hw_queues > 1) + + shost_printk(KERN_ERR, host, + + "fnic: blk-mq is not supported"); + + + + host->nr_hw_queues = fnic->wq_copy_count = 1; + + - dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", + + shost_printk(KERN_INFO, host, + + "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + + - dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + + shost_printk(KERN_INFO, host, + + "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + +++<<<<<<< HEAD +++======= ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { ++ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; ++ fnic->sw_copy_wq[hwq].io_req_table = ++ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * ++ sizeof(struct fnic_io_req *), GFP_KERNEL); ++ ++ if (!fnic->sw_copy_wq[hwq].io_req_table) { ++ fnic_free_ioreq_tables_mq(fnic); ++ return -ENOMEM; ++ } ++ } ++ ++ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", ++ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); ++ ++ fnic_scsi_init(fnic); ++ ++ err = scsi_add_host(fnic->host, &pdev->dev); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); ++ return err; ++ } ++ fc_host_maxframe_size(fnic->host) = iport->max_payload_size; ++ fc_host_dev_loss_tmo(fnic->host) = ++ fnic->config.port_down_timeout / 1000; ++ sprintf(fc_host_symbolic_name(fnic->host), ++ DRV_NAME " v" DRV_VERSION " over %s", fnic->name); ++ fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT; ++ fc_host_node_name(fnic->host) = iport->wwnn; ++ fc_host_port_name(fnic->host) = iport->wwpn; ++ fc_host_supported_classes(fnic->host) = FC_COS_CLASS3; ++ memset(fc_host_supported_fc4s(fnic->host), 0, ++ sizeof(fc_host_supported_fc4s(fnic->host))); ++ fc_host_supported_fc4s(fnic->host)[2] = 1; ++ fc_host_supported_fc4s(fnic->host)[7] = 1; ++ fc_host_supported_speeds(fnic->host) = 0; ++ fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT; ++ ++ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data); ++ if (fnic->host->shost_data != NULL) { ++ if (fnic_tgt_id_binding == 0) { ++ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); ++ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE; ++ } else { ++ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); ++ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN; ++ } ++ } ++ ++ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); ++ if (!fnic->io_req_pool) { ++ scsi_remove_host(fnic->host); ++ return -ENOMEM; ++ } ++ +++>>>>>>> 8697934682f1 (scsi: fnic: Propagate SCSI error code from fnic_scsi_drv_init()) + return 0; + } + +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8ccc5947.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8ccc5947.failed new file mode 100644 index 0000000000000..1174a54826975 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8ccc5947.failed @@ -0,0 +1,29 @@ +scsi: fnic: Fix use of uninitialized value in debug message + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Dheeraj Reddy Jonnalagadda +commit 8ccc5947f5d1608f7217cdbee532c7fc2431f7c9 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8ccc5947.failed + +The oxid variable in fdls_process_abts_req() was only being initialized +inside the if (tport) block, but was being used in a debug print statement +after that block. If tport was NULL, oxid would remain uninitialized. Move +the oxid initialization to happen at declaration using +FNIC_STD_GET_OX_ID(fchdr). + +Fixes: f828af44b8dd ("scsi: fnic: Add support for unsolicited requests and responses") +Closes: https://scan7.scan.coverity.com/#/project-view/52337/11354?selectedIssue=1602772 + Signed-off-by: Dheeraj Reddy Jonnalagadda +Link: https://lore.kernel.org/r/20250108050916.52721-1-dheeraj.linuxdev@gmail.com + Reviewed-by: Karan Tilak Kumar + Signed-off-by: Martin K. Petersen +(cherry picked from commit 8ccc5947f5d1608f7217cdbee532c7fc2431f7c9) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8d26bfcf.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8d26bfcf.failed new file mode 100644 index 0000000000000..2a4b6441dce2f --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8d26bfcf.failed @@ -0,0 +1,225 @@ +scsi: fnic: Add support to handle port channel RSCN + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 8d26bfcf1d2e829d37ef7f2b506b95e46f25f993 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/8d26bfcf.failed + +Add support to handle port channel RSCN. + +Port channel RSCN is a Cisco vendor specific RSCN event. It is applicable +only to Cisco UCS fabrics. If there's a change in the port channel +configuration, an RCSN is sent to fnic. This is used to serially reset the +scsi initiator fnics so that there's no all paths down scenario. The +affected fnics are added to a list that are reset with a small time gap +between them. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-15-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 8d26bfcf1d2e829d37ef7f2b506b95e46f25f993) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,b9b0a4f0b78c..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -127,9 -205,39 +127,41 @@@ + #define fnic_clear_state_flags(fnicp, st_flags) \ + __fnic_set_state_flags(fnicp, st_flags, 1) + +++<<<<<<< HEAD +++======= ++ enum reset_states { ++ NOT_IN_PROGRESS = 0, ++ IN_PROGRESS, ++ RESET_ERROR ++ }; ++ ++ enum rscn_type { ++ NOT_PC_RSCN = 0, ++ PC_RSCN ++ }; ++ ++ enum pc_rscn_handling_status { ++ PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0, ++ PC_RSCN_HANDLING_IN_PROGRESS ++ }; ++ ++ enum pc_rscn_handling_feature { ++ PC_RSCN_HANDLING_FEATURE_OFF = 0, ++ PC_RSCN_HANDLING_FEATURE_ON ++ }; ++ ++ extern unsigned int fnic_fdmi_support; +++>>>>>>> 8d26bfcf1d2e (scsi: fnic: Add support to handle port channel RSCN) + extern unsigned int fnic_log_level; + extern unsigned int io_completions; + -extern struct workqueue_struct *fnic_event_queue; + ++ extern unsigned int pc_rscn_handling_feature_flag; ++ extern spinlock_t reset_fnic_list_lock; ++ extern struct list_head reset_fnic_list; ++ extern struct workqueue_struct *reset_fnic_work_queue; ++ extern struct work_struct reset_fnic_work; ++ ++ + #define FNIC_MAIN_LOGGING 0x01 + #define FNIC_FCS_LOGGING 0x02 + #define FNIC_SCSI_LOGGING 0x04 +@@@ -298,8 -438,16 +331,21 @@@ struct fnic + struct work_struct link_work; + struct work_struct frame_work; + struct work_struct flush_work; +++<<<<<<< HEAD + + struct sk_buff_head frame_queue; + + struct sk_buff_head tx_queue; +++======= ++ struct list_head frame_queue; ++ struct list_head tx_queue; ++ mempool_t *frame_pool; ++ mempool_t *frame_elem_pool; ++ struct work_struct tport_work; ++ struct list_head tport_event_list; ++ ++ char subsys_desc[14]; ++ int subsys_desc_len; ++ int pc_rscn_handling_status; +++>>>>>>> 8d26bfcf1d2e (scsi: fnic: Add support to handle port channel RSCN) + + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); +@@@ -386,10 -532,10 +432,11 @@@ void fnic_handle_link_event(struct fni + int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); + + void fnic_handle_fip_frame(struct work_struct *work); ++ void fnic_reset_work_handler(struct work_struct *work); + void fnic_handle_fip_event(struct fnic *fnic); + void fnic_fcoe_reset_vlans(struct fnic *fnic); + -extern void fnic_handle_fip_timer(struct timer_list *t); + +void fnic_fcoe_evlist_free(struct fnic *fnic); + +extern void fnic_handle_fip_timer(struct fnic *fnic); + + static inline int + fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,40ed6b2490e2..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -89,13 -93,13 +93,23 @@@ static unsigned int fnic_max_qdepth = F + module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + +++<<<<<<< HEAD + +static struct libfc_function_template fnic_transport_template = { + + .frame_send = fnic_send, + + .lport_set_port_id = fnic_set_port_id, + + .fcp_abort_io = fnic_empty_scsi_cleanup, + + .fcp_cleanup = fnic_empty_scsi_cleanup, + + .exch_mgr_reset = fnic_exch_mgr_reset + +}; +++======= ++ unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON; ++ module_param(pc_rscn_handling_feature_flag, uint, 0644); ++ MODULE_PARM_DESC(pc_rscn_handling_feature_flag, ++ "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))"); ++ ++ struct workqueue_struct *reset_fnic_work_queue; ++ struct workqueue_struct *fnic_fip_queue; +++>>>>>>> 8d26bfcf1d2e (scsi: fnic: Add support to handle port channel RSCN) + + static int fnic_slave_alloc(struct scsi_device *sdev) + { +@@@ -1168,8 -1318,15 +1195,11 @@@ err_pci_register + err_fc_transport: + destroy_workqueue(fnic_fip_queue); + err_create_fip_workq: ++ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) ++ destroy_workqueue(reset_fnic_work_queue); ++ err_create_reset_fnic_workq: + destroy_workqueue(fnic_event_queue); + err_create_fnic_workq: + - kmem_cache_destroy(fdls_frame_elem_cache); + -err_create_fdls_frame_cache_elem: + - kmem_cache_destroy(fdls_frame_cache); + -err_create_fdls_frame_cache: + kmem_cache_destroy(fnic_io_req_cache); + err_create_fnic_ioreq_slab: + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); +@@@ -1186,8 -1343,14 +1216,17 @@@ static void __exit fnic_cleanup_module( + { + pci_unregister_driver(&fnic_driver); + destroy_workqueue(fnic_event_queue); +++<<<<<<< HEAD + + if (fnic_fip_queue) +++======= ++ ++ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) ++ destroy_workqueue(reset_fnic_work_queue); ++ ++ if (fnic_fip_queue) { ++ flush_workqueue(fnic_fip_queue); +++>>>>>>> 8d26bfcf1d2e (scsi: fnic: Add support to handle port channel RSCN) + destroy_workqueue(fnic_fip_queue); + - } + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + kmem_cache_destroy(fnic_io_req_cache); +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3..40efed6acd14 100644 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@ -1409,3 +1409,39 @@ void fnic_handle_fip_timer(struct fnic *fnic) + break; + } + } ++ ++void fnic_reset_work_handler(struct work_struct *work) ++{ ++ struct fnic *cur_fnic, *next_fnic; ++ unsigned long reset_fnic_list_lock_flags; ++ int host_reset_ret_code; ++ ++ /* ++ * This is a single thread. It is per fnic module, not per fnic ++ * All the fnics that need to be reset ++ * have been serialized via the reset fnic list. ++ */ ++ spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags); ++ list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) { ++ list_del(&cur_fnic->links); ++ spin_unlock_irqrestore(&reset_fnic_list_lock, ++ reset_fnic_list_lock_flags); ++ ++ dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n", ++ cur_fnic->fnic_num); ++ host_reset_ret_code = fnic_host_reset(cur_fnic->host); ++ dev_err(&cur_fnic->pdev->dev, ++ "fnic: <%d>: returned from host reset with status: %d\n", ++ cur_fnic->fnic_num, host_reset_ret_code); ++ ++ spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags); ++ cur_fnic->pc_rscn_handling_status = ++ PC_RSCN_HANDLING_NOT_IN_PROGRESS; ++ spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags); ++ ++ spin_lock_irqsave(&reset_fnic_list_lock, ++ reset_fnic_list_lock_flags); ++ } ++ spin_unlock_irqrestore(&reset_fnic_list_lock, ++ reset_fnic_list_lock_flags); ++} +* Unmerged path drivers/scsi/fnic/fnic_main.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9243626c.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9243626c.failed new file mode 100644 index 0000000000000..8928d9f6fd4cf --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9243626c.failed @@ -0,0 +1,1622 @@ +scsi: fnic: Modify fnic interfaces to use FDLS + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 9243626c211e4d6f5add84c5a7b141e94a2e7222 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9243626c.failed + +Modify fnic driver interfaces to use FDLS and supporting functions. + +Refactor code in fnic_probe and fnic_remove. + +Get fnic from shost_priv. + +Add error handling in stats processing functions. + +Modify some print statements. + +Add support to do module unload cleanup. + +Use placeholder functions/modify function declarations to not break +compilation. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-12-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 9243626c211e4d6f5add84c5a7b141e94a2e7222) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_fcs.c +# drivers/scsi/fnic/fnic_main.c +# drivers/scsi/fnic/fnic_res.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,19e8775f1bfc..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -88,16 -82,102 +88,92 @@@ + #define FNIC_DEV_RST_TERM_DONE BIT(20) + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + +++<<<<<<< HEAD +++======= ++ #define IS_FNIC_FCP_INITIATOR(fnic) (fnic->role == FNIC_ROLE_FCP_INITIATOR) ++ ++ #define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */ ++ #define FNIC_FCOE_MAX_CMD_LEN 16 ++ /* Retry supported by rport (returned by PRLI service parameters) */ ++ #define FNIC_FC_RP_FLAGS_RETRY 0x1 ++ ++ /* Cisco vendor id */ ++ #define PCI_VENDOR_ID_CISCO 0x1137 ++ #define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ ++ ++ /* sereno pcie switch */ ++ #define PCI_DEVICE_ID_CISCO_SERENO 0x004e ++ #define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ ++ #define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ ++ #define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ ++ ++ /* Sereno */ ++ #define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ ++ #define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ ++ #define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ ++ ++ /* Cruz */ ++ #define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ ++ /* Cruz MountTian SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b ++ #define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ ++ /* Cruz MountTian2 SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 ++ #define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ ++ ++ /* Bodega */ ++ /* VIC 1457 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 ++ #define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ ++ /* VIC 1487 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a ++ #define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ ++ /* VIC 1440 Mezz mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 ++ #define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ ++ ++ /* Beverly */ ++ #define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ ++ #define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ ++ #define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ ++ #define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ ++ #define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ ++ #define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ ++ #define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ ++ ++ struct fnic_pcie_device { ++ u32 device; ++ u8 *desc; ++ u32 subsystem_device; ++ u8 *subsys_desc; ++ }; ++ +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + /* + - * fnic private data per SCSI command. + + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. + */ + -struct fnic_cmd_priv { + - struct fnic_io_req *io_req; + - enum fnic_ioreq_state state; + - u32 flags; + - u16 abts_status; + - u16 lr_status; + -}; + - + -static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) + -{ + - return scsi_cmd_priv(cmd); + -} + - + -static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) + -{ + - struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + - + - return ((u64)fcmd->flags << 32) | fcmd->state; + -} + +#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) + +#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) + +#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) + +#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) + +#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) + +#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) + + #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +@@@ -229,6 -343,9 +305,12 @@@ struct fnic_cpy_wq + /* Per-instance private data structure */ + struct fnic { + int fnic_num; +++<<<<<<< HEAD +++======= ++ enum fnic_role_e role; ++ struct fnic_iport_s iport; ++ struct Scsi_Host *host; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + struct fc_lport *lport; + struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ + struct vnic_dev_bar bar0; +@@@ -333,14 -466,9 +415,9 @@@ + ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; + }; + +- static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) +- { +- return container_of(fip, struct fnic, ctlr); +- } +- + extern struct workqueue_struct *fnic_event_queue; + extern struct workqueue_struct *fnic_fip_queue; + -extern const struct attribute_group *fnic_host_groups[]; + +extern struct device_attribute *fnic_attrs[]; + + void fnic_clear_intr_mode(struct fnic *fnic); + int fnic_set_intr_mode(struct fnic *fnic); +@@@ -365,12 -493,12 +442,21 @@@ void fnic_update_mac_locked(struct fni + int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); + int fnic_abort_cmd(struct scsi_cmnd *); + int fnic_device_reset(struct scsi_cmnd *); +++<<<<<<< HEAD + +int fnic_host_reset(struct scsi_cmnd *); + +int fnic_reset(struct Scsi_Host *); + +void fnic_scsi_cleanup(struct fc_lport *); + +void fnic_scsi_abort_io(struct fc_lport *); + +void fnic_empty_scsi_cleanup(struct fc_lport *); + +void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); +++======= ++ int fnic_eh_host_reset_handler(struct scsi_cmnd *sc); ++ int fnic_host_reset(struct Scsi_Host *shost); ++ void fnic_reset(struct Scsi_Host *shost); ++ int fnic_issue_fc_host_lip(struct Scsi_Host *shost); ++ void fnic_get_host_port_state(struct Scsi_Host *shost); ++ void fnic_scsi_fcpio_reset(struct fnic *fnic); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index); + int fnic_wq_cmpl_handler(struct fnic *fnic, int); + int fnic_flogi_reg_handler(struct fnic *fnic, u32); +@@@ -379,10 -507,11 +465,15 @@@ void fnic_wq_copy_cleanup_handler(struc + int fnic_fw_reset_handler(struct fnic *fnic); + void fnic_terminate_rport_io(struct fc_rport *); + const char *fnic_state_to_str(unsigned int state); + -void fnic_mq_map_queues_cpus(struct Scsi_Host *host); + + + void fnic_log_q_error(struct fnic *fnic); + void fnic_handle_link_event(struct fnic *fnic); +++<<<<<<< HEAD + + +++======= ++ int fnic_stats_debugfs_init(struct fnic *fnic); ++ void fnic_stats_debugfs_remove(struct fnic *fnic); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); + + void fnic_handle_fip_frame(struct work_struct *work); +@@@ -398,4 -526,90 +489,93 @@@ fnic_chk_state_flags_locked(struct fni + } + void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +++<<<<<<< HEAD +++======= ++ void fnic_free_txq(struct list_head *head); ++ int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, ++ char **subsys_desc); ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); ++ void fnic_delete_fcp_tports(struct fnic *fnic); ++ void fnic_flush_tport_event_list(struct fnic *fnic); ++ int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); ++ unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid); ++ unsigned int fnic_count_all_ioreqs(struct fnic *fnic); ++ unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq, ++ struct scsi_device *device); ++ unsigned int fnic_count_lun_ioreqs(struct fnic *fnic, ++ struct scsi_device *device); ++ void fnic_scsi_unload(struct fnic *fnic); ++ void fnic_scsi_unload_cleanup(struct fnic *fnic); ++ int fnic_get_debug_info(struct stats_debug_info *info, ++ struct fnic *fnic); ++ ++ struct fnic_scsi_iter_data { ++ struct fnic *fnic; ++ void *data1; ++ void *data2; ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2); ++ }; ++ ++ static inline bool ++ fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) ++ { ++ struct fnic_scsi_iter_data *iter = iter_data; ++ ++ return iter->fn(iter->fnic, sc, iter->data1, iter->data2); ++ } ++ ++ static inline void ++ fnic_scsi_io_iter(struct fnic *fnic, ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2), ++ void *data1, void *data2) ++ { ++ struct fnic_scsi_iter_data iter_data = { ++ .fn = fn, ++ .fnic = fnic, ++ .data1 = data1, ++ .data2 = data2, ++ }; ++ scsi_host_busy_iter(fnic->lport->host, fnic_io_iter_handler, &iter_data); ++ } ++ ++ #ifdef FNIC_DEBUG ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) ++ { ++ int i; ++ ++ for (i = 0; i < len; i = i+8) { ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, ++ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], ++ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); ++ } ++ } ++ ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ int len, char *pfx) ++ { ++ uint32_t s_id, d_id; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", ++ pfx, s_id, d_id, fchdr->fh_type, ++ FNIC_STD_GET_OX_ID(fchdr), len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)fchdr, len); ++ ++ } ++ #else /* FNIC_DEBUG */ ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ uint32_t len, char *pfx) {} ++ #endif /* FNIC_DEBUG */ +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + #endif /* _FNIC_H_ */ +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,8dba1168b652..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -26,25 -14,152 +26,158 @@@ + #include + #include + #include + +#include + #include + #include + -#include + #include "fnic_io.h" + #include "fnic.h" + -#include "fnic_fdls.h" + -#include "fdls_fc.h" + +#include "fnic_fip.h" + #include "cq_enet_desc.h" + #include "cq_exch_desc.h" + -#include "fip.h" + + -#define MAX_RESET_WAIT_COUNT 64 + - + -extern struct workqueue_struct *fnic_fip_queue; + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; + +struct workqueue_struct *fnic_fip_queue; + struct workqueue_struct *fnic_event_queue; + +++<<<<<<< HEAD + +static void fnic_set_eth_mode(struct fnic *); + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +++======= ++ static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; ++ ++ /* ++ * Internal Functions ++ * This function will initialize the src_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, ++ uint8_t *src_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ src_mac[0], src_mac[1], src_mac[2], src_mac[3], ++ src_mac[4], src_mac[5]); ++ ++ memcpy(fnic->iport.fpma, src_mac, 6); ++ } ++ ++ /* ++ * This function will initialize the dst_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, ++ uint8_t *dst_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], ++ dst_mac[4], dst_mac[5]); ++ ++ memcpy(fnic->iport.fcfmac, dst_mac, 6); ++ } ++ ++ void fnic_get_host_port_state(struct Scsi_Host *shost) ++ { ++ struct fnic *fnic = *((struct fnic **) shost_priv(shost)); ++ struct fnic_iport_s *iport = &fnic->iport; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (!fnic->link_status) ++ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; ++ else if (iport->state == FNIC_IPORT_STATE_READY) ++ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; ++ else ++ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } ++ ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "link up: %d, usefip: %d", linkup, iport->usefip); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ if (linkup) { ++ if (iport->usefip) { ++ iport->state = FNIC_IPORT_STATE_FIP; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "link up: %d, usefip: %d", linkup, iport->usefip); ++ fnic_fcoe_send_vlan_req(fnic); ++ } else { ++ iport->state = FNIC_IPORT_STATE_FABRIC_DISC; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport->state: %d", iport->state); ++ fnic_fdls_disc_start(iport); ++ } ++ } else { ++ iport->state = FNIC_IPORT_STATE_LINK_WAIT; ++ if (!is_zero_ether_addr(iport->fpma)) ++ vnic_dev_del_addr(fnic->vdev, iport->fpma); ++ fnic_common_fip_cleanup(fnic); ++ fnic_fdls_link_down(iport); ++ ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ } ++ ++ ++ /* ++ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp ++ * or derive from FC_MAP and FCID combination. While it should be ++ * same, revisit this if there is any possibility of not-correct. ++ */ ++ void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, ++ uint8_t *fcid) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; ++ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; ++ ++ memcpy(&fcmac[3], fcid, 3); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ ethhdr->h_dest[0], ethhdr->h_dest[1], ++ ethhdr->h_dest[2], ethhdr->h_dest[3], ++ ethhdr->h_dest[4], ethhdr->h_dest[5]); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], ++ fcmac[5]); ++ ++ fnic_fdls_set_fcoe_srcmac(fnic, fcmac); ++ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); ++ } ++ ++ void fnic_fdls_init(struct fnic *fnic, int usefip) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ /* Initialize iPort structure */ ++ iport->state = FNIC_IPORT_STATE_INIT; ++ iport->fnic = fnic; ++ iport->usefip = usefip; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", ++ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], ++ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); ++ ++ INIT_LIST_HEAD(&iport->tport_list); ++ INIT_LIST_HEAD(&iport->tport_list_pending_del); ++ ++ fnic_fdls_disc_init(iport); ++ } +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + + void fnic_handle_link(struct work_struct *work) + { +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,44cbb04b2421..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -70,7 -63,15 +70,12 @@@ unsigned int fnic_log_level + module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); + + -unsigned int fnic_fdmi_support = 1; + -module_param(fnic_fdmi_support, int, 0644); + -MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support"); + ++ static unsigned int fnic_tgt_id_binding = 1; ++ module_param(fnic_tgt_id_binding, uint, 0644); ++ MODULE_PARM_DESC(fnic_tgt_id_binding, ++ "Target ID binding (0 for none. 1 for binding by WWPN (default))"); ++ + unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS; + module_param(io_completions, int, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time"); +@@@ -165,12 -162,12 +170,17 @@@ static struct fc_function_template fnic + .show_starget_port_id = 1, + .show_rport_dev_loss_tmo = 1, + .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, +++<<<<<<< HEAD + + .issue_fc_host_lip = fnic_reset, + + .get_fc_host_stats = fnic_get_stats, +++======= ++ .issue_fc_host_lip = fnic_issue_fc_host_lip, ++ .get_fc_host_stats = NULL, +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + .reset_fc_host_stats = fnic_reset_host_stats, +- .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), ++ .dd_fcrport_size = sizeof(struct rport_dd_data_s), + .terminate_rport_io = fnic_terminate_rport_io, +- .bsg_request = fc_lport_bsg_request, ++ .bsg_request = NULL, + }; + + static void fnic_get_host_speed(struct Scsi_Host *shost) +@@@ -203,42 -231,11 +244,48 @@@ + } + } + ++ /* Placeholder function */ + static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) + { +++<<<<<<< HEAD + + int ret; + + struct fc_lport *lp = shost_priv(host); + + struct fnic *fnic = lport_priv(lp); + + struct fc_host_statistics *stats = &lp->host_stats; + + struct vnic_stats *vs; + + unsigned long flags; + + + + if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) + + return stats; + + fnic->stats_time = jiffies; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (ret) { + + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic: Get vnic stats failed" + + " 0x%x", ret); + + return stats; + + } + + vs = fnic->stats; + + stats->tx_frames = vs->tx.tx_unicast_frames_ok; + + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + + stats->rx_frames = vs->rx.rx_unicast_frames_ok; + + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; + + stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; + + stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; + + stats->invalid_crc_count = vs->rx.rx_crc_errors; + + stats->seconds_since_last_reset = + + (jiffies - fnic->stats_reset_time) / HZ; + + stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); + + stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); + + +++======= ++ struct fnic *fnic = *((struct fnic **) shost_priv(host)); ++ struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + return stats; + } + +@@@ -571,50 -558,117 +631,136 @@@ static int fnic_scsi_drv_init(struct fn + fnic->fnic_max_tag_id = host->can_queue; + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; +- host->max_cmd_len = FCOE_MAX_CMD_LEN; ++ host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + + if (host->nr_hw_queues > 1) + + shost_printk(KERN_ERR, host, + + "fnic: blk-mq is not supported"); + + + + host->nr_hw_queues = fnic->wq_copy_count = 1; + + - dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", + + shost_printk(KERN_INFO, host, + + "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + + - dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + + shost_printk(KERN_INFO, host, + + "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { ++ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; ++ fnic->sw_copy_wq[hwq].io_req_table = ++ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * ++ sizeof(struct fnic_io_req *), GFP_KERNEL); ++ } ++ ++ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", ++ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); ++ ++ fnic_scsi_init(fnic); ++ ++ err = scsi_add_host(fnic->lport->host, &pdev->dev); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); ++ return -1; ++ } ++ fc_host_maxframe_size(fnic->lport->host) = iport->max_payload_size; ++ fc_host_dev_loss_tmo(fnic->lport->host) = ++ fnic->config.port_down_timeout / 1000; ++ sprintf(fc_host_symbolic_name(fnic->lport->host), ++ DRV_NAME " v" DRV_VERSION " over %s", fnic->name); ++ fc_host_port_type(fnic->lport->host) = FC_PORTTYPE_NPORT; ++ fc_host_node_name(fnic->lport->host) = iport->wwnn; ++ fc_host_port_name(fnic->lport->host) = iport->wwpn; ++ fc_host_supported_classes(fnic->lport->host) = FC_COS_CLASS3; ++ memset(fc_host_supported_fc4s(fnic->lport->host), 0, ++ sizeof(fc_host_supported_fc4s(fnic->lport->host))); ++ fc_host_supported_fc4s(fnic->lport->host)[2] = 1; ++ fc_host_supported_fc4s(fnic->lport->host)[7] = 1; ++ fc_host_supported_speeds(fnic->lport->host) = 0; ++ fc_host_supported_speeds(fnic->lport->host) |= FC_PORTSPEED_8GBIT; ++ ++ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->lport->host->shost_data); ++ if (fnic->lport->host->shost_data != NULL) { ++ if (fnic_tgt_id_binding == 0) { ++ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); ++ fc_host_tgtid_bind_type(fnic->lport->host) = FC_TGTID_BIND_NONE; ++ } else { ++ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); ++ fc_host_tgtid_bind_type(fnic->lport->host) = FC_TGTID_BIND_BY_WWPN; ++ } ++ } ++ ++ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); ++ if (!fnic->io_req_pool) { ++ scsi_remove_host(fnic->lport->host); ++ return -ENOMEM; ++ } ++ + return 0; + } + +++<<<<<<< HEAD +++======= ++ void fnic_mq_map_queues_cpus(struct Scsi_Host *host) ++ { ++ struct fnic *fnic = *((struct fnic **) shost_priv(host)); ++ struct pci_dev *l_pdev = fnic->pdev; ++ int intr_mode = fnic->config.intr_mode; ++ struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT]; ++ ++ if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) { ++ FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "intr_mode is not msix\n"); ++ return; ++ } ++ ++ FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "qmap->nr_queues: %d\n", qmap->nr_queues); ++ ++ if (l_pdev == NULL) { ++ FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "l_pdev is null\n"); ++ return; ++ } ++ ++ blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET); ++ } ++ +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { +- struct Scsi_Host *host; +- struct fc_lport *lp; ++ struct Scsi_Host *host = NULL; + struct fnic *fnic; + mempool_t *pool; ++ struct fnic_iport_s *iport; + int err = 0; + int fnic_id = 0; + int i; + unsigned long flags; +++<<<<<<< HEAD +++======= ++ char *desc, *subsys_desc; ++ int len; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + + /* +- * Allocate SCSI Host and set up association between host, +- * local port, and fnic ++ * Allocate fnic + */ +++<<<<<<< HEAD + + lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); + + if (!lp) { + + printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); +++======= ++ fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL); ++ if (!fnic) { +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + err = -ENOMEM; +- goto err_out; ++ goto err_out_fnic_alloc; + } + +- host = lp->host; +- fnic = lport_priv(lp); ++ iport = &fnic->iport; + + fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL); + if (fnic_id < 0) { +@@@ -622,35 -676,37 +768,49 @@@ + err = fnic_id; + goto err_out_ida_alloc; + } +++<<<<<<< HEAD + + fnic->lport = lp; + + fnic->ctlr.lp = lp; + + + + fnic->link_events = 0; + + + + snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, + + host->host_no); + + + + host->transportt = fnic_fc_transport; +++======= ++ ++ fnic->pdev = pdev; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + fnic->fnic_num = fnic_id; +- fnic_stats_debugfs_init(fnic); + + - /* Find model name from PCIe subsys ID */ + - if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) { + - dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc); + - + - /* Update FDMI model */ + - fnic->subsys_desc_len = strlen(subsys_desc); + - len = ARRAY_SIZE(fnic->subsys_desc); + - if (fnic->subsys_desc_len > len) + - fnic->subsys_desc_len = len; + - memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len); + - dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc); + - } else { + - fnic->subsys_desc_len = 0; + - dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown", + - pdev->subsystem_device); + - } + + /* Setup PCI resources */ + + pci_set_drvdata(pdev, fnic); + + + + fnic->pdev = pdev; + + err = pci_enable_device(pdev); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "Cannot enable PCI device, aborting.\n"); + + goto err_out_free_hba; +++======= ++ dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n"); ++ goto err_out_pci_enable_device; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "Cannot enable PCI resources, aborting\n"); + + goto err_out_disable_device; +++======= ++ dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n"); ++ goto err_out_pci_request_regions; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + + pci_set_master(pdev); +@@@ -663,19 -719,17 +823,19 @@@ + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + - dev_err(&fnic->pdev->dev, "No usable DMA configuration " + + shost_printk(KERN_ERR, fnic->lport->host, + + "No usable DMA configuration " + "aborting\n"); +- goto err_out_release_regions; ++ goto err_out_set_dma_mask; + } + } + + /* Map vNIC resources from BAR0 */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + - dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n"); + + shost_printk(KERN_ERR, fnic->lport->host, + + "BAR0 not memory-map'able, aborting.\n"); + err = -ENODEV; +- goto err_out_release_regions; ++ goto err_out_map_bar; + } + + fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); +@@@ -683,83 -737,101 +843,130 @@@ + fnic->bar0.len = pci_resource_len(pdev, 0); + + if (!fnic->bar0.vaddr) { + - dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, " + + shost_printk(KERN_ERR, fnic->lport->host, + + "Cannot memory-map BAR0 res hdr, " + "aborting.\n"); + err = -ENODEV; +- goto err_out_release_regions; ++ goto err_out_fnic_map_bar; + } + + fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); + if (!fnic->vdev) { + - dev_err(&fnic->pdev->dev, "vNIC registration failed, " + + shost_printk(KERN_ERR, fnic->lport->host, + + "vNIC registration failed, " + "aborting.\n"); + err = -ENODEV; +- goto err_out_iounmap; ++ goto err_out_dev_register; + } + + err = vnic_dev_cmd_init(fnic->vdev); + if (err) { + - dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n", + + shost_printk(KERN_ERR, fnic->lport->host, + + "vnic_dev_cmd_init() returns %d, aborting\n", + err); +- goto err_out_vnic_unregister; ++ goto err_out_dev_cmd_init; + } + + err = fnic_dev_wait(fnic->vdev, vnic_dev_open, + vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "vNIC dev open failed, aborting.\n"); + + goto err_out_dev_cmd_deinit; +++======= ++ dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n"); ++ goto err_out_dev_open; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + + err = vnic_dev_init(fnic->vdev, 0); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "vNIC dev init failed, aborting.\n"); + + goto err_out_dev_close; +++======= ++ dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n"); ++ goto err_out_dev_init; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + +- err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); ++ err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "vNIC get MAC addr failed \n"); + + goto err_out_dev_close; +++======= ++ dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n"); ++ goto err_out_dev_mac_addr; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + /* set data_src for point-to-point mode and to keep it non-zero */ +- memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); ++ memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN); + + /* Get vNIC configuration */ + err = fnic_get_vnic_config(fnic); + if (err) { + - dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, " + + shost_printk(KERN_ERR, fnic->lport->host, + + "Get vNIC configuration failed, " + "aborting.\n"); +- goto err_out_dev_close; ++ goto err_out_fnic_get_config; ++ } ++ ++ switch (fnic->config.flags & 0xff0) { ++ case VFCF_FC_INITIATOR: ++ { ++ host = ++ scsi_host_alloc(&fnic_host_template, ++ sizeof(struct fnic *)); ++ if (!host) { ++ dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n"); ++ err = -ENOMEM; ++ goto err_out_scsi_host_alloc; ++ } ++ *((struct fnic **) shost_priv(host)) = fnic; ++ ++ fnic->lport->host = host; ++ fnic->role = FNIC_ROLE_FCP_INITIATOR; ++ dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n", ++ fnic->fnic_num); ++ } ++ break; ++ default: ++ dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num); ++ err = -EINVAL; ++ goto err_out_fnic_role; + } + + - /* Setup PCI resources */ + - pci_set_drvdata(pdev, fnic); + + fnic_scsi_drv_init(fnic); + + fnic_get_res_counts(fnic); + + err = fnic_set_intr_mode(fnic); + if (err) { + - dev_err(&fnic->pdev->dev, "Failed to set intr mode, " + + shost_printk(KERN_ERR, fnic->lport->host, + + "Failed to set intr mode, " + "aborting.\n"); +- goto err_out_dev_close; ++ goto err_out_fnic_set_intr_mode; + } + + err = fnic_alloc_vnic_resources(fnic); + if (err) { + - dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, " + + shost_printk(KERN_ERR, fnic->lport->host, + + "Failed to alloc vNIC resources, " + "aborting.\n"); +++<<<<<<< HEAD + + goto err_out_clear_intr; + + } + + +++======= ++ goto err_out_fnic_alloc_vnic_res; ++ } ++ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", ++ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + + /* initialize all fnic locks */ + spin_lock_init(&fnic->fnic_lock); +@@@ -774,17 -846,9 +981,20 @@@ + fnic->fw_ack_index[i] = -1; + } + +++<<<<<<< HEAD + + for (i = 0; i < FNIC_IO_LOCKS; i++) + + spin_lock_init(&fnic->io_req_lock[i]); + + + + err = -ENOMEM; + + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + + if (!fnic->io_req_pool) + + goto err_out_free_resources; + + +++======= +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + if (!pool) +- goto err_out_free_ioreq_pool; ++ goto err_out_free_resources; + fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; + + pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); +@@@ -806,21 -876,18 +1016,29 @@@ + /* enable directed and multicast */ + vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); + vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); +++<<<<<<< HEAD + + vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + + fnic->set_vlan = fnic_set_vlan; + + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + + timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); +++======= ++ vnic_dev_add_addr(fnic->vdev, iport->hwmac); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + - INIT_LIST_HEAD(&fnic->fip_frame_queue); + - INIT_LIST_HEAD(&fnic->vlan_list); + - timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0); + - timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0); + - timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0); + - timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0); + - fnic->set_vlan = fnic_set_vlan; + + INIT_WORK(&fnic->event_work, fnic_handle_event); + + skb_queue_head_init(&fnic->fip_frame_queue); + + INIT_LIST_HEAD(&fnic->evlist); + + INIT_LIST_HEAD(&fnic->vlans); + } else { +++<<<<<<< HEAD + + shost_printk(KERN_INFO, fnic->lport->host, + + "firmware uses non-FIP mode\n"); + + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); + + fnic->ctlr.state = FIP_ST_NON_FIP; +++======= ++ dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n"); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + fnic->state = FNIC_IN_FC_MODE; + +@@@ -833,9 -900,8 +1051,14 @@@ + /* Setup notification buffer area */ + err = fnic_notify_set(fnic); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "Failed to alloc notify buffer, aborting.\n"); + + goto err_out_free_max_pool; +++======= ++ dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n"); ++ goto err_out_fnic_notify_set; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + } + + /* Setup notify timer when using MSI interrupts */ +@@@ -844,113 -910,122 +1067,226 @@@ + + /* allocate RQ buffers and post them to RQ*/ + for (i = 0; i < fnic->rq_count; i++) { + + vnic_rq_enable(&fnic->rq[i]); + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) { + - dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc " + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic_alloc_rq_frame can't alloc " + "frame\n"); +++<<<<<<< HEAD + + goto err_out_free_rq_buf; + + } + + } + + + + /* + + * Initialization done with PCI system, hardware, firmware. + + * Add host to SCSI + + */ + + err = scsi_add_host(lp->host, &pdev->dev); + + if (err) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic: scsi_add_host failed...exiting\n"); + + goto err_out_free_rq_buf; + + } + + + + /* Start local port initiatialization */ + + + + lp->link_up = 0; + + + + lp->max_retry_count = fnic->config.flogi_retries; + + lp->max_rport_retry_count = fnic->config.plogi_retries; + + lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + + FCP_SPPF_CONF_COMPL); + + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + + lp->service_params |= FCP_SPPF_RETRY; + + + + lp->boot_time = jiffies; + + lp->e_d_tov = fnic->config.ed_tov; + + lp->r_a_tov = fnic->config.ra_tov; + + lp->link_supported_speeds = FC_PORTSPEED_10GBIT; + + fc_set_wwnn(lp, fnic->config.node_wwn); + + fc_set_wwpn(lp, fnic->config.port_wwn); + + + + fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); + + + + if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, + + FCPIO_HOST_EXCH_RANGE_END, NULL)) { + + err = -ENOMEM; + + goto err_out_remove_scsi_host; + + } + + + + fc_lport_init_stats(lp); + + fnic->stats_reset_time = jiffies; + + + + fc_lport_config(lp); + + + + if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + + + sizeof(struct fc_frame_header))) { + + err = -EINVAL; + + goto err_out_free_exch_mgr; + + } + + fc_host_maxframe_size(lp->host) = lp->mfs; + + fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; + + + + sprintf(fc_host_symbolic_name(lp->host), + + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); +++======= ++ goto err_out_alloc_rq_buf; ++ } ++ } ++ ++ init_completion(&fnic->reset_completion_wait); ++ ++ /* Start local port initialization */ ++ iport->max_flogi_retries = fnic->config.flogi_retries; ++ iport->max_plogi_retries = fnic->config.plogi_retries; ++ iport->plogi_timeout = fnic->config.plogi_timeout; ++ iport->service_params = ++ (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS | ++ FNIC_FCP_SP_CONF_CMPL); ++ if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) ++ iport->service_params |= FNIC_FCP_SP_RETRY; ++ ++ iport->boot_time = jiffies; ++ iport->e_d_tov = fnic->config.ed_tov; ++ iport->r_a_tov = fnic->config.ra_tov; ++ iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT; ++ iport->wwpn = fnic->config.port_wwn; ++ iport->wwnn = fnic->config.node_wwn; ++ ++ iport->max_payload_size = fnic->config.maxdatafieldsize; ++ ++ if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) || ++ (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) || ++ ((iport->max_payload_size % 4) != 0)) { ++ iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN; ++ } ++ ++ iport->flags |= FNIC_FIRST_LINK_UP; ++ ++ timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback, ++ 0); ++ ++ fnic->stats_reset_time = jiffies; ++ ++ INIT_WORK(&fnic->link_work, fnic_handle_link); ++ INIT_WORK(&fnic->frame_work, fnic_handle_frame); ++ INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); ++ INIT_WORK(&fnic->flush_work, fnic_flush_tx); ++ ++ INIT_LIST_HEAD(&fnic->frame_queue); ++ INIT_LIST_HEAD(&fnic->tx_queue); ++ INIT_LIST_HEAD(&fnic->tport_event_list); ++ ++ INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry, ++ fdls_schedule_oxid_free_retry_work); ++ ++ /* Initialize the oxid reclaim list and work struct */ ++ INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list); ++ INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler); ++ ++ /* Enable all queues */ ++ for (i = 0; i < fnic->raw_wq_count; i++) ++ vnic_wq_enable(&fnic->wq[i]); ++ for (i = 0; i < fnic->rq_count; i++) { ++ if (!ioread32(&fnic->rq[i].ctrl->enable)) ++ vnic_rq_enable(&fnic->rq[i]); ++ } ++ for (i = 0; i < fnic->wq_copy_count; i++) ++ vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); ++ ++ vnic_dev_enable(fnic->vdev); ++ ++ err = fnic_request_intr(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); ++ goto err_out_fnic_request_intr; ++ } ++ ++ fnic_notify_timer_start(fnic); ++ ++ fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE)); ++ ++ if (IS_FNIC_FCP_INITIATOR(fnic) && fnic_scsi_drv_init(fnic)) ++ goto err_out_scsi_drv_init; ++ ++ err = fnic_stats_debugfs_init(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n"); ++ goto err_out_free_stats_debugfs; ++ } ++ ++ for (i = 0; i < fnic->intr_count; i++) ++ vnic_intr_unmask(&fnic->intr[i]); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + + spin_lock_irqsave(&fnic_list_lock, flags); + list_add_tail(&fnic->list, &fnic_list); + spin_unlock_irqrestore(&fnic_list_lock, flags); + +++<<<<<<< HEAD + + INIT_WORK(&fnic->link_work, fnic_handle_link); + + INIT_WORK(&fnic->frame_work, fnic_handle_frame); + + INIT_WORK(&fnic->flush_work, fnic_flush_tx); + + skb_queue_head_init(&fnic->frame_queue); + + skb_queue_head_init(&fnic->tx_queue); + + + + /* Enable all queues */ + + for (i = 0; i < fnic->raw_wq_count; i++) + + vnic_wq_enable(&fnic->wq[i]); + + for (i = 0; i < fnic->wq_copy_count; i++) + + vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); + + + + fc_fabric_login(lp); + + + + err = fnic_request_intr(fnic); + + if (err) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "Unable to request irq.\n"); + + goto err_out_free_exch_mgr; + + } + + + + vnic_dev_enable(fnic->vdev); + + + + for (i = 0; i < fnic->intr_count; i++) + + vnic_intr_unmask(&fnic->intr[i]); + + + + fnic_notify_timer_start(fnic); + + + + return 0; + + + +err_out_free_exch_mgr: + + fc_exch_mgr_free(lp); + +err_out_remove_scsi_host: + + fc_remove_host(lp->host); + + scsi_remove_host(lp->host); + +err_out_free_rq_buf: + + for (i = 0; i < fnic->rq_count; i++) + + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + + vnic_dev_notify_unset(fnic->vdev); + +err_out_free_max_pool: +++======= ++ return 0; ++ ++ err_out_free_stats_debugfs: ++ fnic_stats_debugfs_remove(fnic); ++ scsi_remove_host(fnic->lport->host); ++ err_out_scsi_drv_init: ++ fnic_free_intr(fnic); ++ err_out_fnic_request_intr: ++ err_out_alloc_rq_buf: ++ for (i = 0; i < fnic->rq_count; i++) { ++ if (ioread32(&fnic->rq[i].ctrl->enable)) ++ vnic_rq_disable(&fnic->rq[i]); ++ vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); ++ } ++ vnic_dev_notify_unset(fnic->vdev); ++ err_out_fnic_notify_set: ++ mempool_destroy(fnic->frame_elem_pool); ++ err_out_fdls_frame_elem_pool: ++ mempool_destroy(fnic->frame_pool); ++ err_out_fdls_frame_pool: +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); + err_out_free_dflt_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); +@@@ -983,15 -1064,16 +1325,19 @@@ err_out_fnic_alloc + static void fnic_remove(struct pci_dev *pdev) + { + struct fnic *fnic = pci_get_drvdata(pdev); +- struct fc_lport *lp = fnic->lport; + unsigned long flags; +++<<<<<<< HEAD +++======= + + /* +- * Mark state so that the workqueue thread stops forwarding +- * received frames and link events to the local port. ISR and +- * other threads that can queue work items will also stop +- * creating work items on the fnic workqueue ++ * Sometimes when probe() fails and do not exit with an error code, ++ * remove() gets called with 'drvdata' not set. Avoid a crash by ++ * adding a defensive check. + */ ++ if (!fnic) ++ return; +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) ++ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->stop_rx_link_events = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +@@@ -1004,29 -1083,26 +1347,44 @@@ + * be no event queued for this fnic device in the workqueue + */ + flush_workqueue(fnic_event_queue); +++<<<<<<< HEAD + + skb_queue_purge(&fnic->frame_queue); + + skb_queue_purge(&fnic->tx_queue); +++======= ++ ++ if (IS_FNIC_FCP_INITIATOR(fnic)) ++ fnic_scsi_unload(fnic); ++ ++ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) ++ del_timer_sync(&fnic->notify_timer); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + - del_timer_sync(&fnic->retry_fip_timer); + - del_timer_sync(&fnic->fcs_ka_timer); + - del_timer_sync(&fnic->enode_ka_timer); + - del_timer_sync(&fnic->vn_ka_timer); + - + - fnic_free_txq(&fnic->fip_frame_queue); + + del_timer_sync(&fnic->fip_timer); + + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + + fnic_fcoe_evlist_free(fnic); + } + +++<<<<<<< HEAD + + /* + + * Log off the fabric. This stops all remote ports, dns port, + + * logs off the fabric. This flushes all rport, disc, lport work + + * before returning + + */ + + fc_fabric_logoff(fnic->lport); + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + fnic->in_remove = 1; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + fcoe_ctlr_destroy(&fnic->ctlr); + + fc_lport_destroy(lp); +++======= ++ if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) ++ del_timer_sync(&fnic->iport.fabric.fdmi_timer); ++ +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + fnic_stats_debugfs_remove(fnic); + + /* +@@@ -1043,9 -1116,9 +1401,15 @@@ + list_del(&fnic->list); + spin_unlock_irqrestore(&fnic_list_lock, flags); + +++<<<<<<< HEAD + + fc_remove_host(fnic->lport->host); + + scsi_remove_host(fnic->lport->host); + + fc_exch_mgr_free(fnic->lport); +++======= ++ fnic_free_txq(&fnic->frame_queue); ++ fnic_free_txq(&fnic->tx_queue); ++ +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + vnic_dev_notify_unset(fnic->vdev); + fnic_free_intr(fnic); + fnic_free_vnic_resources(fnic); +diff --cc drivers/scsi/fnic/fnic_res.c +index f7c2ee009426,763475587b7f..000000000000 +--- a/drivers/scsi/fnic/fnic_res.c ++++ b/drivers/scsi/fnic/fnic_res.c +@@@ -151,40 -142,28 +156,65 @@@ int fnic_get_vnic_config(struct fnic *f + + c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count); + +++<<<<<<< HEAD + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC MAC addr %pM " + + "wq/wq_copy/rq %d/%d/%d\n", + + fnic->ctlr.ctl_src_addr, + + c->wq_enet_desc_count, c->wq_copy_desc_count, + + c->rq_desc_count); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC node wwn %llx port wwn %llx\n", + + c->node_wwn, c->port_wwn); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC ed_tov %d ra_tov %d\n", + + c->ed_tov, c->ra_tov); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC mtu %d intr timer %d\n", + + c->maxdatafieldsize, c->intr_timer); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC flags 0x%x luns per tgt %d\n", + + c->flags, c->luns_per_tgt); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC flogi_retries %d flogi timeout %d\n", + + c->flogi_retries, c->flogi_timeout); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC plogi retries %d plogi timeout %d\n", + + c->plogi_retries, c->plogi_timeout); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC io throttle count %d link dn timeout %d\n", + + c->io_throttle_count, c->link_down_timeout); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC port dn io retries %d port dn timeout %d\n", + + c->port_down_io_retries, c->port_down_timeout); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC wq_copy_count: %d\n", c->wq_copy_count); + + shost_printk(KERN_INFO, fnic->lport->host, + + "vNIC intr mode: %d\n", c->intr_mode); +++======= ++ dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n", ++ fnic->data_src_addr, ++ c->wq_enet_desc_count, c->wq_copy_desc_count, ++ c->rq_desc_count); ++ dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n", ++ c->node_wwn, c->port_wwn); ++ dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n", ++ c->ed_tov, c->ra_tov); ++ dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n", ++ c->maxdatafieldsize, c->intr_timer); ++ dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n", ++ c->flags, c->luns_per_tgt); ++ dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n", ++ c->flogi_retries, c->flogi_timeout); ++ dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n", ++ c->plogi_retries, c->plogi_timeout); ++ dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n", ++ c->io_throttle_count, c->link_down_timeout); ++ dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n", ++ c->port_down_io_retries, c->port_down_timeout); ++ dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count); ++ dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode); +++>>>>>>> 9243626c211e (scsi: fnic: Modify fnic interfaces to use FDLS) + + return 0; + } +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c +index 6e6e125207e0..f6fef1c02ef2 100644 +--- a/drivers/scsi/fnic/fnic_attrs.c ++++ b/drivers/scsi/fnic/fnic_attrs.c +@@ -23,8 +23,8 @@ + static ssize_t fnic_show_state(struct device *dev, + struct device_attribute *attr, char *buf) + { +- struct fc_lport *lp = shost_priv(class_to_shost(dev)); +- struct fnic *fnic = lport_priv(lp); ++ struct fnic *fnic = ++ *((struct fnic **) shost_priv(class_to_shost(dev))); + + return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]); + } +@@ -38,9 +38,13 @@ static ssize_t fnic_show_drv_version(struct device *dev, + static ssize_t fnic_show_link_state(struct device *dev, + struct device_attribute *attr, char *buf) + { +- struct fc_lport *lp = shost_priv(class_to_shost(dev)); ++ struct fnic *fnic = ++ *((struct fnic **) shost_priv(class_to_shost(dev))); + +- return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down"); ++ return sysfs_emit(buf, "%s\n", ++ ((fnic->iport.state != FNIC_IPORT_STATE_INIT) && ++ (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ? ++ "Link Up" : "Link Down"); + } + + static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); +diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c +index 1a4151ef90c1..892f48d1a91c 100644 +--- a/drivers/scsi/fnic/fnic_debugfs.c ++++ b/drivers/scsi/fnic/fnic_debugfs.c +@@ -21,6 +21,9 @@ + #include + #include "fnic.h" + ++extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer, ++ struct fnic *fnic); ++ + static struct dentry *fnic_trace_debugfs_root; + static struct dentry *fnic_trace_debugfs_file; + static struct dentry *fnic_trace_enable; +@@ -607,6 +610,7 @@ static int fnic_stats_debugfs_open(struct inode *inode, + debug->buf_size = buf_size; + memset((void *)debug->debug_buffer, 0, buf_size); + debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); ++ debug->buffer_len += fnic_get_debug_info(debug, fnic); + + file->private_data = debug; + +@@ -687,26 +691,48 @@ static const struct file_operations fnic_reset_debugfs_fops = { + * It will create file stats and reset_stats under statistics/host# directory + * to log per fnic stats. + */ +-void fnic_stats_debugfs_init(struct fnic *fnic) ++int fnic_stats_debugfs_init(struct fnic *fnic) + { ++ int rc = -1; + char name[16]; + + snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + ++ if (!fnic_stats_debugfs_root) { ++ pr_debug("fnic_stats root doesn't exist\n"); ++ return rc; ++ } ++ + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, + fnic_stats_debugfs_root); + ++ if (!fnic->fnic_stats_debugfs_host) { ++ pr_debug("Cannot create host directory\n"); ++ return rc; ++ } ++ + fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_stats_debugfs_fops); + ++ if (!fnic->fnic_stats_debugfs_file) { ++ pr_debug("Cannot create host stats file\n"); ++ return rc; ++ } ++ + fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_reset_debugfs_fops); ++ if (!fnic->fnic_reset_debugfs_file) { ++ pr_debug("Cannot create host stats file\n"); ++ return rc; ++ } ++ rc = 0; ++ return rc; + } + + /* +* Unmerged path drivers/scsi/fnic/fnic_fcs.c +* Unmerged path drivers/scsi/fnic/fnic_main.c +* Unmerged path drivers/scsi/fnic/fnic_res.c +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f..aad571cd3f3f 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -1707,6 +1707,45 @@ void fnic_terminate_rport_io(struct fc_rport *rport) + fnic_rport_exch_reset(fnic, rport->port_id); + } + ++/* ++ * FCP-SCSI specific handling for module unload ++ * ++ */ ++void fnic_scsi_unload(struct fnic *fnic) ++{ ++ unsigned long flags; ++ ++ /* ++ * Mark state so that the workqueue thread stops forwarding ++ * received frames and link events to the local port. ISR and ++ * other threads that can queue work items will also stop ++ * creating work items on the fnic workqueue ++ */ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) ++ fnic_scsi_fcpio_reset(fnic); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ fnic->in_remove = 1; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ fnic_flush_tport_event_list(fnic); ++ fnic_delete_fcp_tports(fnic); ++} ++ ++void fnic_scsi_unload_cleanup(struct fnic *fnic) ++{ ++ int hwq = 0; ++ ++ fc_remove_host(fnic->host); ++ scsi_remove_host(fnic->host); ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) ++ kfree(fnic->sw_copy_wq[hwq].io_req_table); ++} ++ + /* + * This function is exported to SCSI for sending abort cmnds. + * A SCSI IO is represented by a io_req in the driver. +diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h +index ca7ab8afa60a..324201ac7d45 100644 +--- a/drivers/scsi/fnic/fnic_stats.h ++++ b/drivers/scsi/fnic/fnic_stats.h +@@ -17,6 +17,7 @@ + #ifndef _FNIC_STATS_H_ + #define _FNIC_STATS_H_ + #define FNIC_MQ_MAX_QUEUES 64 ++#include + + struct stats_timestamps { + struct timespec64 last_reset_time; +@@ -130,6 +131,7 @@ struct fnic_stats { + struct reset_stats reset_stats; + struct fw_stats fw_stats; + struct vlan_stats vlan_stats; ++ struct fc_host_statistics host_stats; + struct misc_stats misc_stats; + }; + +diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c +index a1d62546c584..7922435e90eb 100644 +--- a/drivers/scsi/fnic/fnic_trace.c ++++ b/drivers/scsi/fnic/fnic_trace.c +@@ -472,6 +472,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug, + + } + ++int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic) ++{ ++ /* Placeholder function */ ++ return 0; ++} ++ + /* + * fnic_trace_buf_init - Initialize fnic trace buffer logging facility + * diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/924cb24d.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/924cb24d.failed new file mode 100644 index 0000000000000..a39586fa04d0c --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/924cb24d.failed @@ -0,0 +1,242 @@ +scsi: fnic: Stop using the SCSI pointer + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Bart Van Assche +commit 924cb24df4fc4d08d32fcb42fa967fdc3f2137cb +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/924cb24d.failed + +Set .cmd_size in the SCSI host template instead of using the SCSI pointer +from struct scsi_cmnd. This patch prepares for removal of the SCSI pointer +from struct scsi_cmnd. + +Link: https://lore.kernel.org/r/20220218195117.25689-23-bvanassche@acm.org + Reviewed-by: Johannes Thumshirn + Reviewed-by: Himanshu Madhani + Reviewed-by: Hannes Reinecke + Signed-off-by: Bart Van Assche + Signed-off-by: Martin K. Petersen +(cherry picked from commit 924cb24df4fc4d08d32fcb42fa967fdc3f2137cb) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 573859282533,3c00e5b88350..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -557,10 -558,9 +557,16 @@@ static int fnic_queuecommand_lck(struc + io_lock_acquired = 1; + io_req->port_id = rport->port_id; + io_req->start_time = jiffies; +++<<<<<<< HEAD + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_SP(sc) = (char *)io_req; + + CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; + + sc->scsi_done = done; +++======= ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; ++ fnic_priv(sc)->io_req = io_req; ++ fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; +++>>>>>>> 924cb24df4fc (scsi: fnic: Stop using the SCSI pointer) + + /* create copy wq desc and enqueue it */ + wq = &fnic->wq_copy[0]; +@@@ -983,9 -982,11 +988,9 @@@ static void fnic_fcpio_icmnd_cmpl_handl + } + + /* Break link with the SCSI command */ +- CMD_SP(sc) = NULL; +- CMD_FLAGS(sc) |= FNIC_IO_DONE; ++ fnic_priv(sc)->io_req = NULL; ++ fnic_priv(sc)->flags |= FNIC_IO_DONE; + + - spin_unlock_irqrestore(io_lock, flags); + - + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", +@@@ -1192,34 -1190,30 +1196,54 @@@ static void fnic_fcpio_itmf_cmpl_handle + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); +++<<<<<<< HEAD + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + + sc->device->host->host_no, id, + + sc, + + jiffies_to_msecs(jiffies - start_time), + + desc, + + (((u64)hdr_status << 40) | + + (u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | + + CMD_STATE(sc))); + + sc->scsi_done(sc); + + atomic64_dec(&fnic_stats->io_stats.active_ios); + + if (atomic64_read(&fnic->io_cmpl_skip)) + + atomic64_dec(&fnic->io_cmpl_skip); + + else + + atomic64_inc(&fnic_stats->io_stats.io_completions); + + } +++======= ++ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, ++ sc->device->host->host_no, id, ++ sc, ++ jiffies_to_msecs(jiffies - start_time), ++ desc, ++ (((u64)hdr_status << 40) | ++ (u64)sc->cmnd[0] << 32 | ++ (u64)sc->cmnd[2] << 24 | ++ (u64)sc->cmnd[3] << 16 | ++ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), ++ fnic_flags_and_state(sc)); ++ scsi_done(sc); ++ atomic64_dec(&fnic_stats->io_stats.active_ios); ++ if (atomic64_read(&fnic->io_cmpl_skip)) ++ atomic64_dec(&fnic->io_cmpl_skip); ++ else ++ atomic64_inc(&fnic_stats->io_stats.io_completions); +++>>>>>>> 924cb24df4fc (scsi: fnic: Stop using the SCSI pointer) + } + + + } else if (id & FNIC_TAG_DEV_RST) { + /* Completion of device reset */ +- CMD_LR_STATUS(sc) = hdr_status; +- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { ++ fnic_priv(sc)->lr_status = hdr_status; ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); +- CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), +@@@ -1419,23 -1412,22 +1441,41 @@@ cleanup_scsi_cmd + atomic64_inc(&fnic_stats->io_stats.io_completions); + + /* Complete the command to SCSI */ +++<<<<<<< HEAD + + if (sc->scsi_done) { + + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) + + shost_printk(KERN_ERR, fnic->lport->host, + + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", + + tag, sc); + + + + FNIC_TRACE(fnic_cleanup_io, + + sc->device->host->host_no, tag, sc, + + jiffies_to_msecs(jiffies - start_time), + + 0, ((u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); +++======= ++ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) ++ shost_printk(KERN_ERR, fnic->lport->host, ++ "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", ++ tag, sc); ++ ++ FNIC_TRACE(fnic_cleanup_io, ++ sc->device->host->host_no, tag, sc, ++ jiffies_to_msecs(jiffies - start_time), ++ 0, ((u64)sc->cmnd[0] << 32 | ++ (u64)sc->cmnd[2] << 24 | ++ (u64)sc->cmnd[3] << 16 | ++ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), ++ fnic_flags_and_state(sc)); ++ ++ scsi_done(sc); +++>>>>>>> 924cb24df4fc (scsi: fnic: Stop using the SCSI pointer) + + + sc->scsi_done(sc); + + } + return true; + } + +@@@ -1493,17 -1485,15 +1533,27 @@@ wq_copy_cleanup_scsi_cmd + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" + " DID_NO_CONNECT\n"); + +++<<<<<<< HEAD + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_wq_copy_cleanup_handler, + + sc->device->host->host_no, id, sc, + + jiffies_to_msecs(jiffies - start_time), + + 0, ((u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); +++======= ++ FNIC_TRACE(fnic_wq_copy_cleanup_handler, ++ sc->device->host->host_no, id, sc, ++ jiffies_to_msecs(jiffies - start_time), ++ 0, ((u64)sc->cmnd[0] << 32 | ++ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | ++ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), ++ fnic_flags_and_state(sc)); +++>>>>>>> 924cb24df4fc (scsi: fnic: Stop using the SCSI pointer) + + - scsi_done(sc); + + sc->scsi_done(sc); + + } + } + + static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 69f373b53132..e82eb0ef2605 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -89,15 +89,28 @@ + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + + /* +- * Usage of the scsi_cmnd scratchpad. ++ * fnic private data per SCSI command. + * These fields are locked by the hashed io_req_lock. + */ +-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) +-#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) +-#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) +-#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) +-#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) +-#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) ++struct fnic_cmd_priv { ++ struct fnic_io_req *io_req; ++ enum fnic_ioreq_state state; ++ u32 flags; ++ u16 abts_status; ++ u16 lr_status; ++}; ++ ++static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) ++{ ++ return scsi_cmd_priv(cmd); ++} ++ ++static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) ++{ ++ struct fnic_cmd_priv *fcmd = fnic_priv(cmd); ++ ++ return ((u64)fcmd->flags << 32) | fcmd->state; ++} + + #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index d09d15fff065..f3eec17f5a7f 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -124,6 +124,7 @@ static struct scsi_host_template fnic_host_template = { + .max_sectors = 0xffff, + .shost_attrs = fnic_attrs, + .track_queue_depth = 1, ++ .cmd_size = sizeof(struct fnic_cmd_priv), + }; + + static void +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9a71892c.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9a71892c.failed new file mode 100644 index 0000000000000..720ee8815d94e --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9a71892c.failed @@ -0,0 +1,80 @@ +Revert "driver core: Fix uevent_show() vs driver detach race" + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Greg Kroah-Hartman +commit 9a71892cbcdb9d1459c84f5a4c722b14354158a5 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9a71892c.failed + +This reverts commit 15fffc6a5624b13b428bb1c6e9088e32a55eb82c. + +This commit causes a regression, so revert it for now until it can come +back in a way that works for everyone. + +Link: https://lore.kernel.org/all/172790598832.1168608.4519484276671503678.stgit@dwillia2-xfh.jf.intel.com/ +Fixes: 15fffc6a5624 ("driver core: Fix uevent_show() vs driver detach race") + Cc: stable + Cc: Ashish Sangwan + Cc: Namjae Jeon + Cc: Dirk Behme + Cc: Greg Kroah-Hartman + Cc: Rafael J. Wysocki + Cc: Dan Williams + Signed-off-by: Greg Kroah-Hartman +(cherry picked from commit 9a71892cbcdb9d1459c84f5a4c722b14354158a5) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/base/core.c +diff --cc drivers/base/core.c +index b2fe401abd4a,adc0d74fa96c..000000000000 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@@ -22,16 -25,13 +22,20 @@@ + #include + #include + #include + +#include + +#include + #include +++<<<<<<< HEAD + +#include + +#include +++======= ++ #include +++>>>>>>> 9a71892cbcdb (Revert "driver core: Fix uevent_show() vs driver detach race") + #include + -#include + -#include + +#include + #include + #include + +#include /* for dma_default_coherent */ + + #include "base.h" + #include "physical_location.h" +* Unmerged path drivers/base/core.c +diff --git a/drivers/base/module.c b/drivers/base/module.c +index 851cc5367c04..46ad4d636731 100644 +--- a/drivers/base/module.c ++++ b/drivers/base/module.c +@@ -7,7 +7,6 @@ + #include + #include + #include +-#include + #include "base.h" + + static char *make_driver_name(struct device_driver *drv) +@@ -78,9 +77,6 @@ void module_remove_driver(struct device_driver *drv) + if (!drv) + return; + +- /* Synchronize with dev_uevent() */ +- synchronize_rcu(); +- + sysfs_remove_link(&drv->p->kobj, "module"); + + if (drv->owner) diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9ae7563e.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9ae7563e.failed new file mode 100644 index 0000000000000..054af0dc1dc5c --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9ae7563e.failed @@ -0,0 +1,32 @@ +scsi: fnic: Fix indentation and remove unnecessary parenthesis + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 9ae7563e270372f401a06486a92cdf151d1b27ee +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9ae7563e.failed + +Fix indentation in fdls_disc.c to fix kernel test robot warnings. +Remove unnecessary parentheses to fix checkpatch check. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/r/202502141403.1PcpwyJp-lkp@intel.com/ + Reported-by: Dan Carpenter +Closes: https://lore.kernel.org/r/202502141403.1PcpwyJp-lkp@intel.com/ +Fixes: a63e78eb2b0f ("scsi: fnic: Add support for fabric based solicited requests and responses") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250225215013.4875-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 9ae7563e270372f401a06486a92cdf151d1b27ee) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b2d1ecf.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b2d1ecf.failed new file mode 100644 index 0000000000000..9d8d78d5b1558 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b2d1ecf.failed @@ -0,0 +1,30 @@ +scsi: fnic: Remove unnecessary debug print + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 9b2d1ecf8797a82371c9f9209722949fb35b4d15 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b2d1ecf.failed + +Remove unnecessary debug print from fdls_schedule_oxid_free_retry_work. As +suggested by Dan, this information is already present in stack traces, and +the kernel is not expected to fail small allocations. + + Suggested-by: Dan Carpenter +Fixes: a63e78eb2b0f ("scsi: fnic: Add support for fabric based solicited requests and responses") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250225214909.4853-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 9b2d1ecf8797a82371c9f9209722949fb35b4d15) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b9b8594.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b9b8594.failed new file mode 100644 index 0000000000000..a3755ee0b6dab --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b9b8594.failed @@ -0,0 +1,29 @@ +scsi: fnic: Add and improve logs in FDMI and FDMI ABTS paths + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 9b9b8594654a79e3d4166356fd86cd5397477b24 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9b9b8594.failed + +Add logs in FDMI and FDMI ABTS paths. + +Modify log text in these paths. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Reviewed-by: John Meneghini + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250618003431.6314-3-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 9b9b8594654a79e3d4166356fd86cd5397477b24) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9cf9fe2f.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9cf9fe2f.failed new file mode 100644 index 0000000000000..1623d77a39578 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9cf9fe2f.failed @@ -0,0 +1,1280 @@ +scsi: fnic: Add functionality in fnic to support FDLS + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit 9cf9fe2f3ec5dad8b459267a9e977c0b7811b3f8 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/9cf9fe2f.failed + +Add interfaces in fnic to use FDLS services. + +Modify link up and link down functionality to use FDLS. + +Replace existing interfaces to handle new functionality provided by FDLS. + +Modify data types of some data members to handle new functionality. + +Add processing of tports and handling of tports. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202409292037.ZYWZwIK6-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-10-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit 9cf9fe2f3ec5dad8b459267a9e977c0b7811b3f8) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fip.c +# drivers/scsi/fnic/fnic.h +# drivers/scsi/fnic/fnic_fcs.c +# drivers/scsi/fnic/fnic_main.c +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,0f92d57e0aac..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -88,16 -82,101 +88,91 @@@ + #define FNIC_DEV_RST_TERM_DONE BIT(20) + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + +++<<<<<<< HEAD +++======= ++ #define IS_FNIC_FCP_INITIATOR(fnic) (fnic->role == FNIC_ROLE_FCP_INITIATOR) ++ ++ #define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */ ++ /* Retry supported by rport (returned by PRLI service parameters) */ ++ #define FNIC_FC_RP_FLAGS_RETRY 0x1 ++ ++ /* Cisco vendor id */ ++ #define PCI_VENDOR_ID_CISCO 0x1137 ++ #define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ ++ ++ /* sereno pcie switch */ ++ #define PCI_DEVICE_ID_CISCO_SERENO 0x004e ++ #define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ ++ #define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ ++ #define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ ++ ++ /* Sereno */ ++ #define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ ++ #define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ ++ #define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ ++ #define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ ++ ++ /* Cruz */ ++ #define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ ++ /* Cruz MountTian SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b ++ #define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ ++ /* Cruz MountTian2 SIOC */ ++ #define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 ++ #define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ ++ ++ /* Bodega */ ++ /* VIC 1457 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 ++ #define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ ++ /* VIC 1487 PCIe mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a ++ #define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ ++ /* VIC 1440 Mezz mLOM */ ++ #define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 ++ #define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ ++ #define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ ++ ++ /* Beverly */ ++ #define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ ++ #define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ ++ #define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ ++ #define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ ++ #define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ ++ #define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ ++ #define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ ++ #define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ ++ #define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ ++ ++ struct fnic_pcie_device { ++ u32 device; ++ u8 *desc; ++ u32 subsystem_device; ++ u8 *subsys_desc; ++ }; ++ +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + /* + - * fnic private data per SCSI command. + + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. + */ + -struct fnic_cmd_priv { + - struct fnic_io_req *io_req; + - enum fnic_ioreq_state state; + - u32 flags; + - u16 abts_status; + - u16 lr_status; + -}; + - + -static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) + -{ + - return scsi_cmd_priv(cmd); + -} + - + -static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) + -{ + - struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + - + - return ((u64)fcmd->flags << 32) | fcmd->state; + -} + +#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) + +#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) + +#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) + +#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) + +#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) + +#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) + + #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +@@@ -127,8 -206,16 +202,18 @@@ + #define fnic_clear_state_flags(fnicp, st_flags) \ + __fnic_set_state_flags(fnicp, st_flags, 1) + +++<<<<<<< HEAD +++======= ++ enum reset_states { ++ NOT_IN_PROGRESS = 0, ++ IN_PROGRESS, ++ RESET_ERROR ++ }; ++ ++ extern unsigned int fnic_fdmi_support; +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + extern unsigned int fnic_log_level; + extern unsigned int io_completions; + -extern struct workqueue_struct *fnic_event_queue; + + #define FNIC_MAIN_LOGGING 0x01 + #define FNIC_FCS_LOGGING 0x02 +@@@ -249,6 -364,10 +334,13 @@@ struct fnic + unsigned int wq_count; + unsigned int cq_count; + +++<<<<<<< HEAD +++======= ++ struct completion reset_completion_wait; ++ struct mutex sgreset_mutex; ++ spinlock_t sgreset_lock; /* lock for sgreset */ ++ struct scsi_cmnd *sgreset_sc; +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + struct dentry *fnic_stats_debugfs_host; + struct dentry *fnic_stats_debugfs_file; + struct dentry *fnic_reset_debugfs_file; +@@@ -298,8 -420,15 +393,20 @@@ + struct work_struct link_work; + struct work_struct frame_work; + struct work_struct flush_work; +++<<<<<<< HEAD + + struct sk_buff_head frame_queue; + + struct sk_buff_head tx_queue; +++======= ++ struct list_head frame_queue; ++ struct list_head tx_queue; ++ mempool_t *frame_pool; ++ mempool_t *frame_elem_pool; ++ struct work_struct tport_work; ++ struct list_head tport_event_list; ++ ++ char subsys_desc[14]; ++ int subsys_desc_len; +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); +@@@ -351,8 -482,12 +458,9 @@@ int fnic_request_intr(struct fnic *fnic + int fnic_send(struct fc_lport *, struct fc_frame *); + void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); + void fnic_handle_frame(struct work_struct *work); ++ void fnic_tport_event_handler(struct work_struct *work); + void fnic_handle_link(struct work_struct *work); + void fnic_handle_event(struct work_struct *work); + -void fdls_reclaim_oxid_handler(struct work_struct *work); + -void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid); + -void fdls_schedule_oxid_free_retry_work(struct work_struct *work); + int fnic_rq_cmpl_handler(struct fnic *fnic, int); + int fnic_alloc_rq_frame(struct vnic_rq *rq); + void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); +@@@ -398,4 -530,79 +507,82 @@@ fnic_chk_state_flags_locked(struct fni + } + void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +++<<<<<<< HEAD +++======= ++ void fnic_free_txq(struct list_head *head); ++ int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, ++ char **subsys_desc); ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); ++ void fnic_delete_fcp_tports(struct fnic *fnic); ++ void fnic_flush_tport_event_list(struct fnic *fnic); ++ ++ struct fnic_scsi_iter_data { ++ struct fnic *fnic; ++ void *data1; ++ void *data2; ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2); ++ }; ++ ++ static inline bool ++ fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) ++ { ++ struct fnic_scsi_iter_data *iter = iter_data; ++ ++ return iter->fn(iter->fnic, sc, iter->data1, iter->data2); ++ } ++ ++ static inline void ++ fnic_scsi_io_iter(struct fnic *fnic, ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2), ++ void *data1, void *data2) ++ { ++ struct fnic_scsi_iter_data iter_data = { ++ .fn = fn, ++ .fnic = fnic, ++ .data1 = data1, ++ .data2 = data2, ++ }; ++ scsi_host_busy_iter(fnic->lport->host, fnic_io_iter_handler, &iter_data); ++ } ++ ++ #ifdef FNIC_DEBUG ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) ++ { ++ int i; ++ ++ for (i = 0; i < len; i = i+8) { ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, ++ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], ++ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); ++ } ++ } ++ ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ int len, char *pfx) ++ { ++ uint32_t s_id, d_id; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", ++ pfx, s_id, d_id, fchdr->fh_type, ++ FNIC_STD_GET_OX_ID(fchdr), len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)fchdr, len); ++ ++ } ++ #else /* FNIC_DEBUG */ ++ static inline void ++ fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} ++ static inline void ++ fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ uint32_t len, char *pfx) {} ++ #endif /* FNIC_DEBUG */ +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + #endif /* _FNIC_H_ */ +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,b2669f2ddb53..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -26,25 -14,136 +26,147 @@@ + #include + #include + #include + +#include + #include + #include + -#include + #include "fnic_io.h" + #include "fnic.h" + -#include "fnic_fdls.h" + -#include "fdls_fc.h" + +#include "fnic_fip.h" + #include "cq_enet_desc.h" + #include "cq_exch_desc.h" + -#include "fip.h" + +++<<<<<<< HEAD + +static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; + +struct workqueue_struct *fnic_fip_queue; + +struct workqueue_struct *fnic_event_queue; + + + +static void fnic_set_eth_mode(struct fnic *); + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +++======= ++ #define MAX_RESET_WAIT_COUNT 64 ++ ++ extern struct workqueue_struct *fnic_fip_queue; ++ struct workqueue_struct *fnic_event_queue; ++ ++ static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; ++ ++ /* ++ * Internal Functions ++ * This function will initialize the src_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, ++ uint8_t *src_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ src_mac[0], src_mac[1], src_mac[2], src_mac[3], ++ src_mac[4], src_mac[5]); ++ ++ memcpy(fnic->iport.fpma, src_mac, 6); ++ } ++ ++ /* ++ * This function will initialize the dst_mac address to be ++ * used in outgoing frames ++ */ ++ static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, ++ uint8_t *dst_mac) ++ { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], ++ dst_mac[4], dst_mac[5]); ++ ++ memcpy(fnic->iport.fcfmac, dst_mac, 6); ++ } ++ ++ void fnic_fdls_link_status_change(struct fnic *fnic, int linkup) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "link up: %d, usefip: %d", linkup, iport->usefip); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ if (linkup) { ++ if (iport->usefip) { ++ iport->state = FNIC_IPORT_STATE_FIP; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "link up: %d, usefip: %d", linkup, iport->usefip); ++ fnic_fcoe_send_vlan_req(fnic); ++ } else { ++ iport->state = FNIC_IPORT_STATE_FABRIC_DISC; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport->state: %d", iport->state); ++ fnic_fdls_disc_start(iport); ++ } ++ } else { ++ iport->state = FNIC_IPORT_STATE_LINK_WAIT; ++ if (!is_zero_ether_addr(iport->fpma)) ++ vnic_dev_del_addr(fnic->vdev, iport->fpma); ++ fnic_common_fip_cleanup(fnic); ++ fnic_fdls_link_down(iport); ++ ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ } ++ ++ ++ /* ++ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp ++ * or derive from FC_MAP and FCID combination. While it should be ++ * same, revisit this if there is any possibility of not-correct. ++ */ ++ void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, ++ uint8_t *fcid) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; ++ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; ++ ++ memcpy(&fcmac[3], fcid, 3); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ ethhdr->h_dest[0], ethhdr->h_dest[1], ++ ethhdr->h_dest[2], ethhdr->h_dest[3], ++ ethhdr->h_dest[4], ethhdr->h_dest[5]); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", ++ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], ++ fcmac[5]); ++ ++ fnic_fdls_set_fcoe_srcmac(fnic, fcmac); ++ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); ++ } ++ ++ void fnic_fdls_init(struct fnic *fnic, int usefip) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ ++ /* Initialize iPort structure */ ++ iport->state = FNIC_IPORT_STATE_INIT; ++ iport->fnic = fnic; ++ iport->usefip = usefip; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", ++ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], ++ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); ++ ++ INIT_LIST_HEAD(&iport->tport_list); ++ INIT_LIST_HEAD(&iport->tport_list_pending_del); ++ ++ fnic_fdls_disc_init(iport); ++ } +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + + void fnic_handle_link(struct work_struct *work) + { +@@@ -68,124 -183,86 +206,172 @@@ + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + +++<<<<<<< HEAD + + new_port_speed = vnic_dev_port_speed(fnic->vdev); + + atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, + + new_port_speed); + + if (old_port_speed != new_port_speed) + + FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, + + "Current vnic speed set to : %llu\n", + + new_port_speed); +- +- switch (vnic_dev_port_speed(fnic->vdev)) { +- case DCEM_PORTSPEED_10G: +- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; +- fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; +- break; +- case DCEM_PORTSPEED_20G: +- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; +- fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; +- break; +- case DCEM_PORTSPEED_25G: +- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; +- fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; +- break; +- case DCEM_PORTSPEED_40G: +- case DCEM_PORTSPEED_4x10G: +- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; +- fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; +- break; +- case DCEM_PORTSPEED_100G: +- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; +- fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; +- break; +- default: +- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; +- fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; +- break; +++======= ++ while (fnic->reset_in_progress == IN_PROGRESS) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic reset in progress. Link event needs to wait\n"); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "waiting for reset completion\n"); ++ wait_for_completion_timeout(&fnic->reset_completion_wait, ++ msecs_to_jiffies(5000)); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "woken up from reset completion wait\n"); ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ max_count++; ++ if (max_count >= MAX_RESET_WAIT_COUNT) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Rstth waited for too long. Skipping handle link event\n"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ return; ++ } ++ } ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Marking fnic reset in progress\n"); ++ fnic->reset_in_progress = IN_PROGRESS; ++ ++ if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) || ++ (fnic->link_status != old_link_status)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "old link status: %d link status: %d\n", ++ old_link_status, (int) fnic->link_status); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "old down count %d down count: %d\n", ++ old_link_down_cnt, (int) fnic->link_down_cnt); + } + + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) { + /* DOWN -> DOWN */ +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data(fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN->DOWN", + + strlen("Link Status: DOWN->DOWN")); + + } else { + + if (old_link_down_cnt != fnic->link_down_cnt) { + + /* UP -> DOWN -> UP */ + + fnic->lport->host_stats.link_failure_count++; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, + + "Link Status:UP_DOWN_UP", + + strlen("Link_Status:UP_DOWN_UP") + + ); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "link down\n"); + + fcoe_ctlr_link_down(&fnic->ctlr); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + /* start FCoE VLAN discovery */ + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, + + FNIC_FC_LE, + + "Link Status: UP_DOWN_UP_VLAN", + + strlen( + + "Link Status: UP_DOWN_UP_VLAN") + + ); + + fnic_fcoe_send_vlan_req(fnic); + + return; + + } + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "link up\n"); + + fcoe_ctlr_link_up(&fnic->ctlr); + + } else { + + /* UP -> UP */ + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: UP_UP", + + strlen("Link Status: UP_UP")); +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "down->down\n"); ++ } else { ++ if (old_link_down_cnt != fnic->link_down_cnt) { ++ /* UP -> DOWN -> UP */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "up->down. Link down\n"); ++ fnic_fdls_link_status_change(fnic, 0); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "down->up. Link up\n"); ++ fnic_fdls_link_status_change(fnic, 1); ++ } else { ++ /* UP -> UP */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "up->up\n"); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + } + } + } else if (fnic->link_status) { + /* DOWN -> UP */ +++<<<<<<< HEAD + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + /* start FCoE VLAN discovery */ + + fnic_fc_trace_set_data(fnic->lport->host->host_no, + + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + + strlen("Link Status: DOWN_UP_VLAN")); + + fnic_fcoe_send_vlan_req(fnic); + + + + return; + + } + + + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); + + fcoe_ctlr_link_up(&fnic->ctlr); + + } else { + + /* UP -> DOWN */ + + fnic->lport->host_stats.link_failure_count++; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + + fnic_fc_trace_set_data( + + fnic->lport->host->host_no, FNIC_FC_LE, + + "Link Status: UP_DOWN", + + strlen("Link Status: UP_DOWN")); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "deleting fip-timer during link-down\n"); + + del_timer_sync(&fnic->fip_timer); + + } + + fcoe_ctlr_link_down(&fnic->ctlr); +++======= ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "down->up. Link up\n"); ++ fnic_fdls_link_status_change(fnic, 1); ++ } else { ++ /* UP -> DOWN */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "up->down. Link down\n"); ++ fnic_fdls_link_status_change(fnic, 0); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + } + ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ fnic->reset_in_progress = NOT_IN_PROGRESS; ++ complete(&fnic->reset_completion_wait); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Marking fnic reset completion\n"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + } + + /* +@@@ -214,56 -289,47 +398,67 @@@ void fnic_handle_frame(struct work_stru + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && +- fnic->state != FNIC_IN_ETH_MODE) { +- skb_queue_head(&fnic->frame_queue, skb); +- spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ fnic->state != FNIC_IN_ETH_MODE) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Cannot process frame in transitional state\n"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } +- spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +- fc_exch_recv(lp, fp); ++ list_del(&cur_frame->links); ++ ++ /* Frames from FCP_RQ will have ethhdrs stripped off */ ++ fchdr_offset = (cur_frame->rx_ethhdr_stripped) ? ++ 0 : FNIC_ETH_FCOE_HDRS_OFFSET; ++ ++ fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp, ++ cur_frame->frame_len, fchdr_offset); ++ ++ kfree(cur_frame->fp); ++ mempool_free(cur_frame, fnic->frame_elem_pool); + } ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + } + + -void fnic_handle_fip_frame(struct work_struct *work) + +void fnic_fcoe_evlist_free(struct fnic *fnic) + { + - struct fnic_frame_list *cur_frame, *next; + - struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + + struct fnic_event *fevt = NULL; + + struct fnic_event *next = NULL; + + unsigned long flags; + + - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + - "Processing FIP frame\n"); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (list_empty(&fnic->evlist)) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + - spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + - list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue, + - links) { + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + + list_del(&fevt->list); + + kfree(fevt); + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +} + + + +void fnic_handle_event(struct work_struct *work) + +{ + + struct fnic *fnic = container_of(work, struct fnic, event_work); + + struct fnic_event *fevt = NULL; + + struct fnic_event *next = NULL; + + unsigned long flags; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (list_empty(&fnic->evlist)) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + - list_del(&cur_frame->links); + - spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + - kfree(cur_frame->fp); + - kfree(cur_frame); + + list_del(&fevt->list); + + kfree(fevt); + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + - + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. +@@@ -910,21 -499,34 +1105,38 @@@ static void fnic_rq_cmpl_frame_recv(str + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + - "fnic->stop_rx_link_events: %d\n", + - fnic->stop_rx_link_events); + goto drop; + } + - + + fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++<<<<<<< HEAD + + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, + + (char *)skb->data, skb->len)) != 0) { + + printk(KERN_ERR "fnic ctlr frame trace error!!!"); +++======= ++ ++ frame_elem = mempool_alloc(fnic->frame_elem_pool, ++ GFP_ATOMIC | __GFP_ZERO); ++ if (!frame_elem) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate memory for frame elem"); ++ goto drop; +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + } + - frame_elem->fp = fp; + - frame_elem->rx_ethhdr_stripped = ethhdr_stripped; + - frame_elem->frame_len = bytes_written; + +++<<<<<<< HEAD + + skb_queue_tail(&fnic->frame_queue, skb); +++======= ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_add_tail(&frame_elem->links, &fnic->frame_queue); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + queue_work(fnic_event_queue, &fnic->frame_work); + - return; + + + return; + drop: + - kfree(fp); + + dev_kfree_skb_irq(skb); + } + + static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, +@@@ -971,35 -573,32 +1183,40 @@@ int fnic_rq_cmpl_handler(struct fnic *f + int fnic_alloc_rq_frame(struct vnic_rq *rq) + { + struct fnic *fnic = vnic_dev_priv(rq->vdev); +- struct sk_buff *skb; ++ void *buf; + u16 len; + dma_addr_t pa; +- int r; ++ int ret; + +++<<<<<<< HEAD + + len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; + + skb = dev_alloc_skb(len); + + if (!skb) { + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "Unable to allocate RQ sk_buff\n"); +++======= ++ len = FNIC_FRAME_HT_ROOM; ++ buf = kmalloc(len, GFP_ATOMIC); ++ if (!buf) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Unable to allocate RQ buffer of size: %d\n", len); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + return -ENOMEM; + } +- skb_reset_mac_header(skb); +- skb_reset_transport_header(skb); +- skb_reset_network_header(skb); +- skb_put(skb, len); +- pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); ++ ++ pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) { +- r = -ENOMEM; +- printk(KERN_ERR "PCI mapping failed with error %d\n", r); +- goto free_skb; ++ ret = -ENOMEM; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "PCI mapping failed with error %d\n", ret); ++ goto free_buf; + } + +- fnic_queue_rq_desc(rq, skb, pa, len); ++ fnic_queue_rq_desc(rq, buf, pa, len); + return 0; +- +- free_skb: +- kfree_skb(skb); +- return r; ++ free_buf: ++ kfree(buf); ++ return ret; + } + + void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +@@@ -1303,109 -890,173 +1517,281 @@@ void fnic_free_wq_buf(struct vnic_wq *w + buf->os_buf = NULL; + } + +++<<<<<<< HEAD + +void fnic_fcoe_reset_vlans(struct fnic *fnic) + +{ + + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fcoe_vlan *next; + + + + /* + + * indicate a link down to fcoe so that all fcf's are free'd + + * might not be required since we did this before sending vlan + + * discovery request + + */ + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (!list_empty(&fnic->vlans)) { + + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + + list_del(&vlan->list); + + kfree(vlan); + + } + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + +} + + + +void fnic_handle_fip_timer(struct fnic *fnic) + +{ + + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + u64 sol_time; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->stop_rx_link_events) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) + + return; + + + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (list_empty(&fnic->vlans)) { + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* no vlans available, try again */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fip_timer: vlan %d state %d sol_count %d\n", + + vlan->vid, vlan->state, vlan->sol_count); + + switch (vlan->state) { + + case FIP_VLAN_USED: + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "FIP VLAN is selected for FC transaction\n"); + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + break; + + case FIP_VLAN_FAILED: + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* if all vlans are in failed state, restart vlan disc */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + break; + + case FIP_VLAN_SENT: + + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + + /* + + * no response on this vlan, remove from the list. + + * Try the next vlan + + */ + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Dequeue this VLAN ID %d from list\n", + + vlan->vid); + + list_del(&vlan->list); + + kfree(vlan); + + vlan = NULL; + + if (list_empty(&fnic->vlans)) { + + /* we exhausted all vlans, restart vlan disc */ + + spin_unlock_irqrestore(&fnic->vlans_lock, + + flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "fip_timer: vlan list empty, " + + "trigger vlan disc\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + /* check the next vlan */ + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + + list); + + fnic->set_vlan(fnic, vlan->vid); + + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + vlan->sol_count++; + + sol_time = jiffies + msecs_to_jiffies + + (FCOE_CTLR_START_DELAY); + + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + + break; + + } +++======= ++ void ++ fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport, ++ unsigned long flags) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct fc_rport *rport; ++ struct fc_rport_identifiers ids; ++ struct rport_dd_data_s *rdd_data; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Adding rport fcid: 0x%x", tport->fcid); ++ ++ ids.node_name = tport->wwnn; ++ ids.port_name = tport->wwpn; ++ ids.port_id = tport->fcid; ++ ids.roles = FC_RPORT_ROLE_FCP_TARGET; ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ rport = fc_remote_port_add(fnic->lport->host, 0, &ids); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (!rport) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Failed to add rport for tport: 0x%x", tport->fcid); ++ return; ++ } ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Added rport fcid: 0x%x", tport->fcid); ++ ++ /* Mimic these assignments in queuecommand to avoid timing issues */ ++ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; ++ rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; ++ rdd_data = rport->dd_data; ++ rdd_data->tport = tport; ++ rdd_data->iport = iport; ++ tport->rport = rport; ++ tport->flags |= FNIC_FDLS_SCSI_REGISTERED; ++ } ++ ++ void ++ fnic_fdls_remove_tport(struct fnic_iport_s *iport, ++ struct fnic_tport_s *tport, unsigned long flags) ++ { ++ struct fnic *fnic = iport->fnic; ++ struct rport_dd_data_s *rdd_data; ++ ++ struct fc_rport *rport; ++ ++ if (!tport) ++ return; ++ ++ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE); ++ rport = tport->rport; ++ ++ if (rport) { ++ /* tport resource release will be done ++ * after fnic_terminate_rport_io() ++ */ ++ tport->flags |= FNIC_FDLS_TPORT_DELETED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ /* Interface to scsi_fc_transport */ ++ fc_remote_port_delete(rport); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Deregistered and freed tport fcid: 0x%x from scsi transport fc", ++ tport->fcid); ++ ++ /* ++ * the dd_data is allocated by fc transport ++ * of size dd_fcrport_size ++ */ ++ rdd_data = rport->dd_data; ++ rdd_data->tport = NULL; ++ rdd_data->iport = NULL; ++ list_del(&tport->links); ++ kfree(tport); ++ } else { ++ fnic_del_tport_timer_sync(fnic, tport); ++ list_del(&tport->links); ++ kfree(tport); ++ } ++ } ++ ++ void fnic_delete_fcp_tports(struct fnic *fnic) ++ { ++ struct fnic_tport_s *tport, *next; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "removing fcp rport fcid: 0x%x", tport->fcid); ++ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); ++ fnic_del_tport_timer_sync(fnic, tport); ++ fnic_fdls_remove_tport(&fnic->iport, tport, flags); ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } ++ ++ /** ++ * fnic_tport_event_handler() - Handler for remote port events ++ * in the tport_event_queue. ++ * ++ * @work: Handle to the remote port being dequeued ++ */ ++ void fnic_tport_event_handler(struct work_struct *work) ++ { ++ struct fnic *fnic = container_of(work, struct fnic, tport_work); ++ struct fnic_tport_event_s *cur_evt, *next; ++ unsigned long flags; ++ struct fnic_tport_s *tport; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { ++ tport = cur_evt->arg1; ++ switch (cur_evt->event) { ++ case TGT_EV_RPORT_ADD: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Add rport event"); ++ if (tport->state == FDLS_TGT_STATE_READY) { ++ fnic_fdls_add_tport(&fnic->iport, ++ (struct fnic_tport_s *) cur_evt->arg1, flags); ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Target not ready. Add rport event dropped: 0x%x", ++ tport->fcid); ++ } ++ break; ++ case TGT_EV_RPORT_DEL: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Remove rport event"); ++ if (tport->state == FDLS_TGT_STATE_OFFLINING) { ++ fnic_fdls_remove_tport(&fnic->iport, ++ (struct fnic_tport_s *) cur_evt->arg1, flags); ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "remove rport event dropped tport fcid: 0x%x", ++ tport->fcid); ++ } ++ break; ++ case TGT_EV_TPORT_DELETE: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Delete tport event"); ++ fdls_delete_tport(tport->iport, tport); ++ break; ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Unknown tport event"); ++ break; ++ } ++ list_del(&cur_evt->links); ++ kfree(cur_evt); ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } ++ ++ void fnic_flush_tport_event_list(struct fnic *fnic) ++ { ++ struct fnic_tport_event_s *cur_evt, *next; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { ++ list_del(&cur_evt->links); ++ kfree(cur_evt); ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + } +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,943f7d997d10..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -913,15 -936,12 +913,23 @@@ static int fnic_probe(struct pci_dev *p + + INIT_WORK(&fnic->link_work, fnic_handle_link); + INIT_WORK(&fnic->frame_work, fnic_handle_frame); ++ INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); +++<<<<<<< HEAD + + skb_queue_head_init(&fnic->frame_queue); + + skb_queue_head_init(&fnic->tx_queue); + + + + /* Enable all queues */ + + for (i = 0; i < fnic->raw_wq_count; i++) + + vnic_wq_enable(&fnic->wq[i]); + + for (i = 0; i < fnic->wq_copy_count; i++) + + vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); +++======= ++ ++ INIT_LIST_HEAD(&fnic->frame_queue); ++ INIT_LIST_HEAD(&fnic->tx_queue); ++ INIT_LIST_HEAD(&fnic->tport_event_list); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + + fc_fabric_login(lp); + +@@@ -1004,16 -1035,22 +1012,21 @@@ static void fnic_remove(struct pci_dev + * be no event queued for this fnic device in the workqueue + */ + flush_workqueue(fnic_event_queue); +++<<<<<<< HEAD + + skb_queue_purge(&fnic->frame_queue); + + skb_queue_purge(&fnic->tx_queue); +++======= ++ fnic_free_txq(&fnic->frame_queue); ++ fnic_free_txq(&fnic->tx_queue); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + - del_timer_sync(&fnic->retry_fip_timer); + - del_timer_sync(&fnic->fcs_ka_timer); + - del_timer_sync(&fnic->enode_ka_timer); + - del_timer_sync(&fnic->vn_ka_timer); + - + - fnic_free_txq(&fnic->fip_frame_queue); + + del_timer_sync(&fnic->fip_timer); + + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + + fnic_fcoe_evlist_free(fnic); + } + + - if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) + - del_timer_sync(&fnic->iport.fabric.fdmi_timer); + - + /* + * Log off the fabric. This stops all remote ports, dns port, + * logs off the fabric. This flushes all rport, disc, lport work +@@@ -1036,9 -1073,6 +1049,12 @@@ + */ + fnic_cleanup(fnic); + +++<<<<<<< HEAD + + BUG_ON(!skb_queue_empty(&fnic->frame_queue)); + + BUG_ON(!skb_queue_empty(&fnic->tx_queue)); + + +++======= +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + spin_lock_irqsave(&fnic_list_lock, flags); + list_del(&fnic->list); + spin_unlock_irqrestore(&fnic_list_lock, flags); +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f,74298f9a34e5..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -212,8 -183,8 +212,13 @@@ int fnic_fw_reset_handler(struct fnic * + /* indicate fwreset to io path */ + fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); + +++<<<<<<< HEAD + + skb_queue_purge(&fnic->frame_queue); + + skb_queue_purge(&fnic->tx_queue); +++======= ++ fnic_free_txq(&fnic->frame_queue); ++ fnic_free_txq(&fnic->tx_queue); +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) +@@@ -2478,23 -2575,30 +2483,36 @@@ int fnic_reset(struct Scsi_Host *shost + * host is offlined by SCSI. + * + */ +- int fnic_host_reset(struct scsi_cmnd *sc) ++ int fnic_host_reset(struct Scsi_Host *shost) + { +- int ret; ++ int ret = SUCCESS; + unsigned long wait_host_tmo; +- struct Scsi_Host *shost = sc->device->host; +- struct fc_lport *lp = shost_priv(shost); +- struct fnic *fnic = lport_priv(lp); ++ struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + unsigned long flags; ++ struct fnic_iport_s *iport = &fnic->iport; + + spin_lock_irqsave(&fnic->fnic_lock, flags); +- if (!fnic->internal_reset_inprogress) { +- fnic->internal_reset_inprogress = true; ++ if (fnic->reset_in_progress == NOT_IN_PROGRESS) { ++ fnic->reset_in_progress = IN_PROGRESS; + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "host reset in progress skipping another host reset\n"); + + return SUCCESS; +++======= ++ wait_for_completion_timeout(&fnic->reset_completion_wait, ++ msecs_to_jiffies(10000)); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (fnic->reset_in_progress == IN_PROGRESS) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_WARNING, fnic->lport->host, fnic->fnic_num, ++ "Firmware reset in progress. Skipping another host reset\n"); ++ return SUCCESS; ++ } ++ fnic->reset_in_progress = IN_PROGRESS; +++>>>>>>> 9cf9fe2f3ec5 (scsi: fnic: Add functionality in fnic to support FDLS) + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fnic.h +* Unmerged path drivers/scsi/fnic/fnic_fcs.c +* Unmerged path drivers/scsi/fnic/fnic_main.c +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a35b29bd.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a35b29bd.failed new file mode 100644 index 0000000000000..76ca3f7f376f6 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a35b29bd.failed @@ -0,0 +1,80 @@ +scsi: fnic: Fix crash in fnic_wq_cmpl_handler when FDMI times out + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit a35b29bdedb4d2ae3160d4d6684a6f1ecd9ca7c2 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a35b29bd.failed + +When both the RHBA and RPA FDMI requests time out, fnic reuses a frame to +send ABTS for each of them. On send completion, this causes an attempt to +free the same frame twice that leads to a crash. + +Fix crash by allocating separate frames for RHBA and RPA, and modify ABTS +logic accordingly. + +Tested by checking MDS for FDMI information. + +Tested by using instrumented driver to: + + - Drop PLOGI response + - Drop RHBA response + - Drop RPA response + - Drop RHBA and RPA response + - Drop PLOGI response + ABTS response + - Drop RHBA response + ABTS response + - Drop RPA response + ABTS response + - Drop RHBA and RPA response + ABTS response for both of them + +Fixes: 09c1e6ab4ab2 ("scsi: fnic: Add and integrate support for FDMI") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Tested-by: Arun Easi +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi + Tested-by: Karan Tilak Kumar + Cc: stable@vger.kernel.org + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250618003431.6314-1-kartilak@cisco.com + Reviewed-by: John Meneghini + Signed-off-by: Martin K. Petersen +(cherry picked from commit a35b29bdedb4d2ae3160d4d6684a6f1ecd9ca7c2) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic.h +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,86e293ce530d..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -39,7 -29,8 +39,11 @@@ + + #define DRV_NAME "fnic" + #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +++<<<<<<< HEAD + +#define DRV_VERSION "1.6.0.55" +++======= ++ #define DRV_VERSION "1.8.0.1" +++>>>>>>> a35b29bdedb4 (scsi: fnic: Fix crash in fnic_wq_cmpl_handler when FDMI times out) + #define PFX DRV_NAME ": " + #define DFX DRV_NAME "%d: " + +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h +index 5d78eea20873..4eb1c3e2245f 100644 +--- a/drivers/scsi/fnic/fnic_fdls.h ++++ b/drivers/scsi/fnic/fnic_fdls.h +@@ -392,6 +392,7 @@ void fdls_send_tport_abts(struct fnic_iport_s *iport, + bool fdls_delete_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); + void fdls_fdmi_timer_callback(struct timer_list *t); ++void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport); + + /* fnic_fcs.c */ + void fnic_fdls_init(struct fnic *fnic, int usefip); diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a63e78eb.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a63e78eb.failed new file mode 100644 index 0000000000000..e9f9b03e82d73 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a63e78eb.failed @@ -0,0 +1,2755 @@ +scsi: fnic: Add support for fabric based solicited requests and responses + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit a63e78eb2b0f654b138abfc323f6bd7573e26145 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a63e78eb.failed + +Add fdls_disc.c to support fabric based solicited requests and responses. + +Clean up obsolete code but keep the function template so as to not break +compilation. + +Remove duplicate definitions from header files. + +Modify definitions of data members. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202406112309.8GiDUvIM-lkp@intel.com/ +Closes: https://lore.kernel.org/oe-kbuild-all/202406120201.VakI9Dly-lkp@intel.com/ +Closes: https://lore.kernel.org/oe-kbuild-all/202412080837.2JU0r2Ny-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-4-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit a63e78eb2b0f654b138abfc323f6bd7573e26145) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_fcs.c +diff --cc drivers/scsi/fnic/fnic_fcs.c +index 8c3b350695e3,70a5cbf6035e..000000000000 +--- a/drivers/scsi/fnic/fnic_fcs.c ++++ b/drivers/scsi/fnic/fnic_fcs.c +@@@ -1324,88 -1485,3 +1490,91 @@@ void fnic_fcoe_reset_vlans(struct fnic + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + } + +++<<<<<<< HEAD + +void fnic_handle_fip_timer(struct fnic *fnic) + +{ + + unsigned long flags; + + struct fcoe_vlan *vlan; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + u64 sol_time; + + + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->stop_rx_link_events) { + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + return; + + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + + + if (fnic->ctlr.mode == FIP_MODE_NON_FIP) + + return; + + + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + if (list_empty(&fnic->vlans)) { + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* no vlans available, try again */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "fip_timer: vlan %d state %d sol_count %d\n", + + vlan->vid, vlan->state, vlan->sol_count); + + switch (vlan->state) { + + case FIP_VLAN_USED: + + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + + "FIP VLAN is selected for FC transaction\n"); + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + break; + + case FIP_VLAN_FAILED: + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* if all vlans are in failed state, restart vlan disc */ + + if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) + + if (printk_ratelimit()) + + shost_printk(KERN_DEBUG, fnic->lport->host, + + "Start VLAN Discovery\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + break; + + case FIP_VLAN_SENT: + + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + + /* + + * no response on this vlan, remove from the list. + + * Try the next vlan + + */ + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "Dequeue this VLAN ID %d from list\n", + + vlan->vid); + + list_del(&vlan->list); + + kfree(vlan); + + vlan = NULL; + + if (list_empty(&fnic->vlans)) { + + /* we exhausted all vlans, restart vlan disc */ + + spin_unlock_irqrestore(&fnic->vlans_lock, + + flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + + "fip_timer: vlan list empty, " + + "trigger vlan disc\n"); + + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + + return; + + } + + /* check the next vlan */ + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + + list); + + fnic->set_vlan(fnic, vlan->vid); + + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + vlan->sol_count++; + + sol_time = jiffies + msecs_to_jiffies + + (FCOE_CTLR_START_DELAY); + + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + + break; + + } + +} +++======= +++>>>>>>> a63e78eb2b0f (scsi: fnic: Add support for fabric based solicited requests and responses) +diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile +index 6214a6b2e96d..3bd6b1c8b643 100644 +--- a/drivers/scsi/fnic/Makefile ++++ b/drivers/scsi/fnic/Makefile +@@ -7,6 +7,7 @@ fnic-y := \ + fnic_main.o \ + fnic_res.o \ + fnic_fcs.o \ ++ fdls_disc.o \ + fnic_scsi.o \ + fnic_trace.o \ + fnic_debugfs.o \ +diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c +new file mode 100644 +index 000000000000..127607927ce1 +--- /dev/null ++++ b/drivers/scsi/fnic/fdls_disc.c +@@ -0,0 +1,2180 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright 2008 Cisco Systems, Inc. All rights reserved. ++ * Copyright 2007 Nuova Systems, Inc. All rights reserved. ++ */ ++ ++#include ++#include "fnic.h" ++#include "fdls_fc.h" ++#include "fnic_fdls.h" ++#include ++#include ++#include ++ ++#define FC_FC4_TYPE_SCSI 0x08 ++#define PORT_SPEED_BIT_8 8 ++#define PORT_SPEED_BIT_9 9 ++#define PORT_SPEED_BIT_14 14 ++#define PORT_SPEED_BIT_15 15 ++ ++#define RETRIES_EXHAUSTED(iport) \ ++ (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY) ++ ++#define FNIC_TPORT_MAX_NEXUS_RESTART (8) ++ ++#define SCHEDULE_OXID_FREE_RETRY_TIME (300) ++ ++/* Private Functions */ ++static void fdls_send_rpn_id(struct fnic_iport_s *iport); ++static void fdls_process_flogi_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr, ++ void *rx_frame); ++static void fnic_fdls_start_plogi(struct fnic_iport_s *iport); ++static void fnic_fdls_start_flogi(struct fnic_iport_s *iport); ++static void fdls_start_fabric_timer(struct fnic_iport_s *iport, ++ int timeout); ++static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport); ++static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport); ++static void fdls_init_fabric_abts_frame(uint8_t *frame, ++ struct fnic_iport_s *iport); ++ ++uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport) ++{ ++ struct fnic *fnic = iport->fnic; ++ uint8_t *frame = NULL; ++ ++ frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame"); ++ return NULL; ++ } ++ ++ memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ); ++ return frame; ++} ++ ++/** ++ * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool ++ * @iport: Handle to iport instance ++ * @oxid_frame_type: Type of frame to allocate ++ * @active_oxid: the oxid which is in use ++ * ++ * Called with fnic lock held ++ */ ++uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, ++ uint16_t *active_oxid) ++{ ++ struct fnic *fnic = iport->fnic; ++ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; ++ int idx; ++ uint16_t oxid; ++ ++ lockdep_assert_held(&fnic->fnic_lock); ++ ++ /* ++ * Allocate next available oxid from bitmap ++ */ ++ idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx); ++ if (idx == FNIC_OXID_POOL_SZ) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Alloc oxid: all oxid slots are busy iport state:%d\n", ++ iport->state); ++ return FNIC_UNASSIGNED_OXID; ++ } ++ ++ WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap)); ++ oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */ ++ ++ oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type); ++ *active_oxid = oxid; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "alloc oxid: 0x%x, iport state: %d\n", ++ oxid, iport->state); ++ return oxid; ++} ++ ++/** ++ * fdls_free_oxid_idx - Free the oxid using the idx ++ * @iport: Handle to iport instance ++ * @oxid_idx: The index to free ++ * ++ * Free the oxid immediately and make it available for new requests ++ * Called with fnic lock held ++ */ ++static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx) ++{ ++ struct fnic *fnic = iport->fnic; ++ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; ++ ++ lockdep_assert_held(&fnic->fnic_lock); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "free oxid idx: 0x%x\n", oxid_idx); ++ ++ WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap)); ++} ++ ++/** ++ * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work ++ * @work: Handle to work_struct ++ * ++ * Scheduled when an oxid is to be freed later ++ * After freeing expired oxid(s), the handler schedules ++ * another callback with the remaining time ++ * of next unexpired entry in the reclaim list. ++ */ ++void fdls_reclaim_oxid_handler(struct work_struct *work) ++{ ++ struct fnic_oxid_pool_s *oxid_pool = container_of(work, ++ struct fnic_oxid_pool_s, oxid_reclaim_work.work); ++ struct fnic_iport_s *iport = container_of(oxid_pool, ++ struct fnic_iport_s, oxid_pool); ++ struct fnic *fnic = iport->fnic; ++ struct reclaim_entry_s *reclaim_entry, *next; ++ unsigned long delay_j, cur_jiffies; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Reclaim oxid callback\n"); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ /* Though the work was scheduled for one entry, ++ * walk through and free the expired entries which might have been scheduled ++ * at around the same time as the first entry ++ */ ++ list_for_each_entry_safe(reclaim_entry, next, ++ &(oxid_pool->oxid_reclaim_list), links) { ++ ++ /* The list is always maintained in the order of expiry time */ ++ cur_jiffies = jiffies; ++ if (time_before(cur_jiffies, reclaim_entry->expires)) ++ break; ++ ++ list_del(&reclaim_entry->links); ++ fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx); ++ kfree(reclaim_entry); ++ } ++ ++ /* schedule to free up the next entry */ ++ if (!list_empty(&oxid_pool->oxid_reclaim_list)) { ++ reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list, ++ struct reclaim_entry_s, links); ++ ++ delay_j = reclaim_entry->expires - cur_jiffies; ++ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Scheduling next callback at:%ld jiffies\n", delay_j); ++ } ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++} ++ ++/** ++ * fdls_free_oxid - Helper function to free the oxid ++ * @iport: Handle to iport instance ++ * @oxid: oxid to free ++ * @active_oxid: the oxid which is in use ++ * ++ * Called with fnic lock held ++ */ ++void fdls_free_oxid(struct fnic_iport_s *iport, ++ uint16_t oxid, uint16_t *active_oxid) ++{ ++ fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid)); ++ *active_oxid = FNIC_UNASSIGNED_OXID; ++} ++ ++/** ++ * fdls_schedule_oxid_free - Schedule oxid to be freed later ++ * @iport: Handle to iport instance ++ * @active_oxid: the oxid which is in use ++ * ++ * Gets called in a rare case scenario when both a command ++ * (fdls or target discovery) timed out and the following ABTS ++ * timed out as well, without a link change. ++ * ++ * Called with fnic lock held ++ */ ++void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid) ++{ ++ struct fnic *fnic = iport->fnic; ++ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; ++ struct reclaim_entry_s *reclaim_entry; ++ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); ++ int oxid_idx = FNIC_OXID_IDX(*active_oxid); ++ ++ lockdep_assert_held(&fnic->fnic_lock); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Schedule oxid free. oxid: 0x%x\n", *active_oxid); ++ ++ *active_oxid = FNIC_UNASSIGNED_OXID; ++ ++ reclaim_entry = (struct reclaim_entry_s *) ++ kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC); ++ ++ if (!reclaim_entry) { ++ FNIC_FCS_DBG(KERN_WARNING, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate memory for reclaim struct for oxid idx: %d\n", ++ oxid_idx); ++ ++ /* Retry the scheduling */ ++ WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free)); ++ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0); ++ return; ++ } ++ ++ reclaim_entry->oxid_idx = oxid_idx; ++ reclaim_entry->expires = round_jiffies(jiffies + delay_j); ++ ++ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); ++ ++ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); ++} ++ ++/** ++ * fdls_schedule_oxid_free_retry_work - Thread to schedule the ++ * oxid to be freed later ++ * ++ * @work: Handle to the work struct ++ */ ++void fdls_schedule_oxid_free_retry_work(struct work_struct *work) ++{ ++ struct fnic_oxid_pool_s *oxid_pool = container_of(work, ++ struct fnic_oxid_pool_s, schedule_oxid_free_retry.work); ++ struct fnic_iport_s *iport = container_of(oxid_pool, ++ struct fnic_iport_s, oxid_pool); ++ struct fnic *fnic = iport->fnic; ++ struct reclaim_entry_s *reclaim_entry; ++ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); ++ int idx; ++ ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) { ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Schedule oxid free. oxid idx: %d\n", idx); ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ reclaim_entry = (struct reclaim_entry_s *) ++ kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL); ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ ++ if (!reclaim_entry) { ++ FNIC_FCS_DBG(KERN_WARNING, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n", ++ idx); ++ ++ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, ++ msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME)); ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ return; ++ } ++ ++ if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) { ++ reclaim_entry->oxid_idx = idx; ++ reclaim_entry->expires = round_jiffies(jiffies + delay_j); ++ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); ++ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); ++ } else { ++ /* unlikely scenario, free the allocated memory and continue */ ++ kfree(reclaim_entry); ++ } ++} ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++} ++ ++static bool fdls_is_oxid_fabric_req(uint16_t oxid) ++{ ++ int oxid_frame_type = FNIC_FRAME_TYPE(oxid); ++ ++ switch (oxid_frame_type) { ++ case FNIC_FRAME_TYPE_FABRIC_FLOGI: ++ case FNIC_FRAME_TYPE_FABRIC_PLOGI: ++ case FNIC_FRAME_TYPE_FABRIC_RPN: ++ case FNIC_FRAME_TYPE_FABRIC_RFT: ++ case FNIC_FRAME_TYPE_FABRIC_RFF: ++ case FNIC_FRAME_TYPE_FABRIC_GPN_FT: ++ case FNIC_FRAME_TYPE_FABRIC_LOGO: ++ break; ++ default: ++ return false; ++ } ++ return true; ++} ++ ++static bool fdls_is_oxid_fdmi_req(uint16_t oxid) ++{ ++ int oxid_frame_type = FNIC_FRAME_TYPE(oxid); ++ ++ switch (oxid_frame_type) { ++ case FNIC_FRAME_TYPE_FDMI_PLOGI: ++ case FNIC_FRAME_TYPE_FDMI_RHBA: ++ case FNIC_FRAME_TYPE_FDMI_RPA: ++ break; ++ default: ++ return false; ++ } ++ return true; ++} ++ ++static bool fdls_is_oxid_tgt_req(uint16_t oxid) ++{ ++ int oxid_frame_type = FNIC_FRAME_TYPE(oxid); ++ ++ switch (oxid_frame_type) { ++ case FNIC_FRAME_TYPE_TGT_PLOGI: ++ case FNIC_FRAME_TYPE_TGT_PRLI: ++ case FNIC_FRAME_TYPE_TGT_ADISC: ++ case FNIC_FRAME_TYPE_TGT_LOGO: ++ break; ++ default: ++ return false; ++ } ++ return true; ++} ++ ++void fnic_del_fabric_timer_sync(struct fnic *fnic) ++{ ++ fnic->iport.fabric.del_timer_inprogress = 1; ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ del_timer_sync(&fnic->iport.fabric.retry_timer); ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ fnic->iport.fabric.del_timer_inprogress = 0; ++} ++ ++void fnic_del_tport_timer_sync(struct fnic *fnic, ++ struct fnic_tport_s *tport) ++{ ++ tport->del_timer_inprogress = 1; ++ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); ++ del_timer_sync(&tport->retry_timer); ++ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); ++ tport->del_timer_inprogress = 0; ++} ++ ++static void ++fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout) ++{ ++ u64 fabric_tov; ++ struct fnic *fnic = iport->fnic; ++ ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport fcid: 0x%x: Canceling fabric disc timer\n", ++ iport->fcid); ++ fnic_del_fabric_timer_sync(fnic); ++ iport->fabric.timer_pending = 0; ++ } ++ ++ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) ++ iport->fabric.retry_counter++; ++ ++ fabric_tov = jiffies + msecs_to_jiffies(timeout); ++ mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov)); ++ iport->fabric.timer_pending = 1; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fabric timer is %d ", timeout); ++} ++ ++void fdls_init_plogi_frame(uint8_t *frame, ++ struct fnic_iport_s *iport) ++{ ++ struct fc_std_flogi *pplogi; ++ uint8_t s_id[3]; ++ ++ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *pplogi = (struct fc_std_flogi) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC}, ++ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .els = { ++ .fl_cmd = ELS_PLOGI, ++ .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, ++ .sp_lo_ver = FNIC_FC_PH_VER_LO, ++ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), ++ .sp_features = cpu_to_be16(FC_SP_FT_CIRO), ++ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ), ++ .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS), ++ .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO), ++ .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)}, ++ .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ), ++ .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800), ++ .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF), ++ .fl_cssp[2].cp_open_seq = 1} ++ }; ++ ++ FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn); ++ FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn); ++ FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size); ++ ++ hton24(s_id, iport->fcid); ++ FNIC_STD_SET_S_ID(pplogi->fchdr, s_id); ++} ++ ++static void fdls_init_logo_frame(uint8_t *frame, ++ struct fnic_iport_s *iport) ++{ ++ struct fc_std_logo *plogo; ++ uint8_t s_id[3]; ++ ++ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *plogo = (struct fc_std_logo) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}}, ++ .els.fl_cmd = ELS_LOGO, ++ }; ++ ++ hton24(s_id, iport->fcid); ++ FNIC_STD_SET_S_ID(plogo->fchdr, s_id); ++ memcpy(plogo->els.fl_n_port_id, s_id, 3); ++ ++ FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn, ++ iport->wwpn); ++} ++ ++static void fdls_init_fabric_abts_frame(uint8_t *frame, ++ struct fnic_iport_s *iport) ++{ ++ struct fc_frame_header *pfabric_abts; ++ ++ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *pfabric_abts = (struct fc_frame_header) { ++ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ ++ .fh_s_id = {0x00, 0x00, 0x00}, ++ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, ++ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, ++ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), ++ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ ++ }; ++} ++static void fdls_send_fabric_abts(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ uint8_t s_id[3]; ++ uint8_t d_id[3]; ++ struct fnic *fnic = iport->fnic; ++ struct fc_frame_header *pfabric_abts; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_frame_header); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send fabric ABTS"); ++ return; ++ } ++ ++ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ fdls_init_fabric_abts_frame(frame, iport); ++ ++ hton24(s_id, iport->fcid); ++ ++ switch (iport->fabric.state) { ++ case FDLS_STATE_FABRIC_LOGO: ++ hton24(d_id, FC_FID_FLOGI); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_FABRIC_FLOGI: ++ hton24(d_id, FC_FID_FLOGI); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_FABRIC_PLOGI: ++ FNIC_STD_SET_S_ID(*pfabric_abts, s_id); ++ hton24(d_id, FC_FID_DIR_SERV); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_RPN_ID: ++ FNIC_STD_SET_S_ID(*pfabric_abts, s_id); ++ hton24(d_id, FC_FID_DIR_SERV); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_SCR: ++ FNIC_STD_SET_S_ID(*pfabric_abts, s_id); ++ hton24(d_id, FC_FID_FCTRL); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_REGISTER_FC4_TYPES: ++ FNIC_STD_SET_S_ID(*pfabric_abts, s_id); ++ hton24(d_id, FC_FID_DIR_SERV); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_REGISTER_FC4_FEATURES: ++ FNIC_STD_SET_S_ID(*pfabric_abts, s_id); ++ hton24(d_id, FC_FID_DIR_SERV); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ ++ case FDLS_STATE_GPN_FT: ++ FNIC_STD_SET_S_ID(*pfabric_abts, s_id); ++ hton24(d_id, FC_FID_DIR_SERV); ++ FNIC_STD_SET_D_ID(*pfabric_abts, d_id); ++ break; ++ default: ++ return; ++ } ++ ++ oxid = iport->active_oxid_fabric_req; ++ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x", ++ iport->fcid, iport->fabric.state, oxid); ++ ++ iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED; ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++ iport->fabric.timer_pending = 1; ++} ++ ++static void fdls_send_fabric_flogi(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_flogi *pflogi; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_flogi); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send FLOGI"); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ ++ pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *pflogi = (struct fc_std_flogi) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE}, ++ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .els.fl_cmd = ELS_FLOGI, ++ .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, ++ .sp_lo_ver = FNIC_FC_PH_VER_LO, ++ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), ++ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)}, ++ .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) ++ }; ++ ++ FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn); ++ FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn); ++ FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size); ++ FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov); ++ FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, ++ &iport->active_oxid_fabric_req); ++ ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send FLOGI", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++err_out: ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++static void fdls_send_fabric_plogi(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_flogi *pplogi; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_flogi); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send PLOGI"); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ ++ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ fdls_init_plogi_frame(frame, iport); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI, ++ &iport->active_oxid_fabric_req); ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send fabric PLOGI", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++err_out: ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++static void fdls_send_rpn_id(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_rpn_id *prpn_id; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_rpn_id); ++ uint8_t fcid[3]; ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send RPN_ID"); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ ++ prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *prpn_id = (struct fc_std_rpn_id) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, ++ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, ++ .ct_fs_subtype = FC_NS_SUBTYPE, ++ .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)} ++ }; ++ ++ hton24(fcid, iport->fcid); ++ FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid); ++ ++ FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid); ++ FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN, ++ &iport->active_oxid_fabric_req); ++ ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send RPN_ID", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++err_out: ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++static void fdls_send_scr(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_scr *pscr; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_scr); ++ uint8_t fcid[3]; ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send SCR"); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ ++ pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *pscr = (struct fc_std_scr) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, ++ .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .scr = {.scr_cmd = ELS_SCR, ++ .scr_reg_func = ELS_SCRF_FULL} ++ }; ++ ++ hton24(fcid, iport->fcid); ++ FNIC_STD_SET_S_ID(pscr->fchdr, fcid); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR, ++ &iport->active_oxid_fabric_req); ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send SCR", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ FNIC_STD_SET_OX_ID(pscr->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++err_out: ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state) ++{ ++ uint8_t *frame; ++ struct fc_std_gpn_ft *pgpn_ft; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_gpn_ft); ++ uint8_t fcid[3]; ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send GPN FT"); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ ++ pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *pgpn_ft = (struct fc_std_gpn_ft) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, ++ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, ++ .ct_fs_subtype = FC_NS_SUBTYPE, ++ .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)}, ++ .gpn_ft.fn_fc4_type = 0x08 ++ }; ++ ++ hton24(fcid, iport->fcid); ++ FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT, ++ &iport->active_oxid_fabric_req); ++ ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send GPN FT", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; ++ goto err_out; ++ } ++ FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++err_out: ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++ fdls_set_state((&iport->fabric), fdls_state); ++} ++ ++static void fdls_send_register_fc4_types(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_rft_id *prft_id; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_rft_id); ++ uint8_t fcid[3]; ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send RFT"); ++ return; ++ } ++ ++ prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *prft_id = (struct fc_std_rft_id) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, ++ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, ++ .ct_fs_subtype = FC_NS_SUBTYPE, ++ .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)} ++ }; ++ ++ hton24(fcid, iport->fcid); ++ FNIC_STD_SET_S_ID(prft_id->fchdr, fcid); ++ FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT, ++ &iport->active_oxid_fabric_req); ++ ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send RFT", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ return; ++ } ++ FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ if (IS_FNIC_FCP_INITIATOR(fnic)) ++ prft_id->rft_id.fr_fts.ff_type_map[0] = ++ cpu_to_be32(1 << FC_TYPE_FCP); ++ ++ prft_id->rft_id.fr_fts.ff_type_map[1] = ++ cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW)); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++static void fdls_send_register_fc4_features(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_rff_id *prff_id; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_rff_id); ++ uint8_t fcid[3]; ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send RFF"); ++ return; ++ } ++ ++ prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ *prff_id = (struct fc_std_rff_id) { ++ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, ++ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, ++ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, ++ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, ++ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, ++ .ct_fs_subtype = FC_NS_SUBTYPE, ++ .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)}, ++ .rff_id.fr_feat = 0x2, ++ .rff_id.fr_type = FC_TYPE_FCP ++ }; ++ ++ hton24(fcid, iport->fcid); ++ FNIC_STD_SET_S_ID(prff_id->fchdr, fcid); ++ FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF, ++ &iport->active_oxid_fabric_req); ++ ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send RFF", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ return; ++ } ++ FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid, ++ oxid); ++ ++ if (IS_FNIC_FCP_INITIATOR(fnic)) { ++ prff_id->rff_id.fr_type = FC_TYPE_FCP; ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Unknown type", iport->fcid); ++ } ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++/** ++ * fdls_send_fabric_logo - Send flogo to the fcf ++ * @iport: Handle to fnic iport ++ * ++ * This function does not change or check the fabric state. ++ * It the caller's responsibility to set the appropriate iport fabric ++ * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO. ++ * Currently this assumes to be called with fnic lock held. ++ */ ++void fdls_send_fabric_logo(struct fnic_iport_s *iport) ++{ ++ uint8_t *frame; ++ struct fc_std_logo *plogo; ++ struct fnic *fnic = iport->fnic; ++ uint8_t d_id[3]; ++ uint16_t oxid; ++ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + ++ sizeof(struct fc_std_logo); ++ ++ frame = fdls_alloc_frame(iport); ++ if (frame == NULL) { ++ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "Failed to allocate frame to send fabric LOGO"); ++ return; ++ } ++ ++ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); ++ fdls_init_logo_frame(frame, iport); ++ ++ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO, ++ &iport->active_oxid_fabric_req); ++ ++ if (oxid == FNIC_UNASSIGNED_OXID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Failed to allocate OXID to send fabric LOGO", ++ iport->fcid); ++ mempool_free(frame, fnic->frame_pool); ++ return; ++ } ++ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); ++ ++ hton24(d_id, FC_FID_FLOGI); ++ FNIC_STD_SET_D_ID(plogo->fchdr, d_id); ++ ++ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS send fabric LOGO with oxid: 0x%x", ++ iport->fcid, oxid); ++ ++ fnic_send_fcoe_frame(iport, frame, frame_size); ++ ++ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); ++} ++ ++void fdls_fabric_timer_callback(struct timer_list *t) ++{ ++ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer); ++ struct fnic_iport_s *iport = ++ container_of(fabric, struct fnic_iport_s, fabric); ++ struct fnic *fnic = iport->fnic; ++ unsigned long flags; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d", ++ iport->fabric.timer_pending, iport->fabric.state, ++ iport->fabric.retry_counter, iport->max_flogi_retries); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ ++ if (!iport->fabric.timer_pending) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return; ++ } ++ ++ if (iport->fabric.del_timer_inprogress) { ++ iport->fabric.del_timer_inprogress = 0; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fabric_del_timer inprogress(%d). Skip timer cb", ++ iport->fabric.del_timer_inprogress); ++ return; ++ } ++ ++ iport->fabric.timer_pending = 0; ++ ++ /* The fabric state indicates which frames have time out, and we retry */ ++ switch (iport->fabric.state) { ++ case FDLS_STATE_FABRIC_FLOGI: ++ /* Flogi received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < iport->max_flogi_retries)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_fabric_flogi(iport); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { ++ /* Flogi has time out 2*ed_tov send abts */ ++ fdls_send_fabric_abts(iport); ++ } else { ++ /* ABTS has timed out ++ * Mark the OXID to be freed after 2 * r_a_tov and retry the req ++ */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ if (iport->fabric.retry_counter < iport->max_flogi_retries) { ++ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; ++ fdls_send_fabric_flogi(iport); ++ } else ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Exceeded max FLOGI retries"); ++ } ++ break; ++ case FDLS_STATE_FABRIC_PLOGI: ++ /* Plogi received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < iport->max_plogi_retries)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_fabric_plogi(iport); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { ++ /* Plogi has timed out 2*ed_tov send abts */ ++ fdls_send_fabric_abts(iport); ++ } else { ++ /* ABTS has timed out ++ * Mark the OXID to be freed after 2 * r_a_tov and retry the req ++ */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ if (iport->fabric.retry_counter < iport->max_plogi_retries) { ++ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; ++ fdls_send_fabric_plogi(iport); ++ } else ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Exceeded max PLOGI retries"); ++ } ++ break; ++ case FDLS_STATE_RPN_ID: ++ /* Rpn_id received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_rpn_id(iport); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) ++ /* RPN has timed out. Send abts */ ++ fdls_send_fabric_abts(iport); ++ else { ++ /* ABTS has timed out */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ ++ } ++ break; ++ case FDLS_STATE_SCR: ++ /* scr received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_scr(iport); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) ++ /* scr has timed out. Send abts */ ++ fdls_send_fabric_abts(iport); ++ else { ++ /* ABTS has timed out */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "ABTS timed out. Starting PLOGI: %p", iport); ++ fnic_fdls_start_plogi(iport); ++ } ++ break; ++ case FDLS_STATE_REGISTER_FC4_TYPES: ++ /* scr received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_register_fc4_types(iport); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { ++ /* RFT_ID timed out send abts */ ++ fdls_send_fabric_abts(iport); ++ } else { ++ /* ABTS has timed out */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "ABTS timed out. Starting PLOGI: %p", iport); ++ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ ++ } ++ break; ++ case FDLS_STATE_REGISTER_FC4_FEATURES: ++ /* scr received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_register_fc4_features(iport); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) ++ /* SCR has timed out. Send abts */ ++ fdls_send_fabric_abts(iport); ++ else { ++ /* ABTS has timed out */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "ABTS timed out. Starting PLOGI %p", iport); ++ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ ++ } ++ break; ++ case FDLS_STATE_RSCN_GPN_FT: ++ case FDLS_STATE_SEND_GPNFT: ++ case FDLS_STATE_GPN_FT: ++ /* GPN_FT received a LS_RJT with busy we retry from here */ ++ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) ++ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { ++ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; ++ fdls_send_gpn_ft(iport, iport->fabric.state); ++ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { ++ /* gpn_ft has timed out. Send abts */ ++ fdls_send_fabric_abts(iport); ++ } else { ++ /* ABTS has timed out */ ++ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); ++ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) { ++ fdls_send_gpn_ft(iport, iport->fabric.state); ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "ABTS timeout for fabric GPN_FT. Check name server: %p", ++ iport); ++ } ++ } ++ break; ++ default: ++ fnic_fdls_start_flogi(iport); /* Placeholder call */ ++ break; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++} ++ ++ ++static void fnic_fdls_start_flogi(struct fnic_iport_s *iport) ++{ ++ iport->fabric.retry_counter = 0; ++ fdls_send_fabric_flogi(iport); ++ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI); ++ iport->fabric.flags = 0; ++} ++ ++static void fnic_fdls_start_plogi(struct fnic_iport_s *iport) ++{ ++ iport->fabric.retry_counter = 0; ++ fdls_send_fabric_plogi(iport); ++ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI); ++ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; ++} ++ ++ ++static void ++fdls_process_rff_id_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ struct fnic *fnic = iport->fnic; ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr; ++ uint16_t rsp; ++ uint8_t reason_code; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RFF_ID resp recvd in state(%d). Dropping.", ++ fdls_get_state(fdls)); ++ return; ++ } ++ ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ return; ++ } ++ ++ rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr)); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid, ++ (uint32_t) rsp); ++ ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (rsp) { ++ case FC_FS_ACC: ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ fdls->retry_counter = 0; ++ fdls_set_state((&iport->fabric), FDLS_STATE_SCR); ++ fdls_send_scr(iport); ++ break; ++ case FC_FS_RJT: ++ reason_code = rff_rsp->fc_std_ct_hdr.ct_reason; ++ if (((reason_code == FC_FS_RJT_BSY) ++ || (reason_code == FC_FS_RJT_UNABL)) ++ && (fdls->retry_counter < FDLS_RETRY_COUNT)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p", ++ iport); ++ ++ /* Retry again from the timer routine */ ++ fdls->flags |= FNIC_FDLS_RETRY_FRAME; ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RFF_ID returned ELS_LS_RJT. Halting discovery %p", ++ iport); ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ fdls->timer_pending = 0; ++ fdls->retry_counter = 0; ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++static void ++fdls_process_rft_id_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr; ++ uint16_t rsp; ++ uint8_t reason_code; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RFT_ID resp recvd in state(%d). Dropping.", ++ fdls_get_state(fdls)); ++ return; ++ } ++ ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ return; ++ } ++ ++ ++ rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr)); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid, ++ (uint32_t) rsp); ++ ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (rsp) { ++ case FC_FS_ACC: ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ fdls->retry_counter = 0; ++ fdls_send_register_fc4_features(iport); ++ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES); ++ break; ++ case FC_FS_RJT: ++ reason_code = rft_rsp->fc_std_ct_hdr.ct_reason; ++ if (((reason_code == FC_FS_RJT_BSY) ++ || (reason_code == FC_FS_RJT_UNABL)) ++ && (fdls->retry_counter < FDLS_RETRY_COUNT)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine", ++ iport->fcid); ++ ++ /* Retry again from the timer routine */ ++ fdls->flags |= FNIC_FDLS_RETRY_FRAME; ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d", ++ iport->fcid, reason_code, ++ rft_rsp->fc_std_ct_hdr.ct_explan); ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ fdls->timer_pending = 0; ++ fdls->retry_counter = 0; ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++static void ++fdls_process_rpn_id_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr; ++ uint16_t rsp; ++ uint8_t reason_code; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RPN_ID resp recvd in state(%d). Dropping.", ++ fdls_get_state(fdls)); ++ return; ++ } ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ return; ++ } ++ ++ rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr)); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid, ++ (uint32_t) rsp); ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (rsp) { ++ case FC_FS_ACC: ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ fdls->retry_counter = 0; ++ fdls_send_register_fc4_types(iport); ++ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES); ++ break; ++ case FC_FS_RJT: ++ reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason; ++ if (((reason_code == FC_FS_RJT_BSY) ++ || (reason_code == FC_FS_RJT_UNABL)) ++ && (fdls->retry_counter < FDLS_RETRY_COUNT)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RPN_ID returned REJ BUSY. Retry from timer routine %p", ++ iport); ++ ++ /* Retry again from the timer routine */ ++ fdls->flags |= FNIC_FDLS_RETRY_FRAME; ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RPN_ID ELS_LS_RJT. Halting discovery %p", iport); ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ fdls->timer_pending = 0; ++ fdls->retry_counter = 0; ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++static void ++fdls_process_scr_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr; ++ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FDLS process SCR response: 0x%04x", ++ (uint32_t) scr_rsp->scr.scr_cmd); ++ ++ if (fdls_get_state(fdls) != FDLS_STATE_SCR) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "SCR resp recvd in state(%d). Dropping.", ++ fdls_get_state(fdls)); ++ return; ++ } ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ } ++ ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (scr_rsp->scr.scr_cmd) { ++ case ELS_LS_ACC: ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ iport->fabric.retry_counter = 0; ++ fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT); ++ break; ++ ++ case ELS_LS_RJT: ++ ++ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) ++ || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) ++ && (fdls->retry_counter < FDLS_RETRY_COUNT)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "SCR ELS_LS_RJT BUSY. Retry from timer routine %p", ++ iport); ++ /* Retry again from the timer routine */ ++ fdls->flags |= FNIC_FDLS_RETRY_FRAME; ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "SCR returned ELS_LS_RJT. Halting discovery %p", ++ iport); ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ fdls->timer_pending = 0; ++ fdls->retry_counter = 0; ++ } ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++ ++ ++static void ++fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr, int len) ++{ ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr; ++ uint16_t rsp; ++ uint8_t reason_code; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FDLS process GPN_FT response: iport state: %d len: %d", ++ iport->state, len); ++ ++ /* ++ * GPNFT response :- ++ * FDLS_STATE_GPN_FT : GPNFT send after SCR state ++ * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC) ++ * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN ++ * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target, ++ * e.g. after receiving Target LOGO ++ * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress ++ * from previous GPNFT response,a new GPNFT response has come. ++ */ ++ if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC) ++ && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT)) ++ || ((iport->state == FNIC_IPORT_STATE_READY) ++ && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT) ++ || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT) ++ || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.", ++ fdls_get_state(fdls), iport->state); ++ return; ++ } ++ ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ } ++ ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ iport->state = FNIC_IPORT_STATE_READY; ++ rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr)); ++ ++ switch (rsp) { ++ ++ case FC_FS_ACC: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: GPNFT_RSP accept", iport->fcid); ++ break; ++ ++ case FC_FS_RJT: ++ reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code); ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++/** ++ * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf ++ * @iport: Handle to fnic iport ++ * @fchdr: Incoming frame ++ */ ++static void ++fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr; ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ } ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (flogo_rsp->els.fl_cmd) { ++ case ELS_LS_ACC: ++ if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Flogo response. Fabric not in LOGO state. Dropping! %p", ++ iport); ++ return; ++ } ++ ++ iport->fabric.state = FDLS_STATE_FLOGO_DONE; ++ iport->state = FNIC_IPORT_STATE_LINK_WAIT; ++ ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport 0x%p Canceling fabric disc timer\n", ++ iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Flogo response from Fabric for did: 0x%x", ++ ntoh24(fchdr->fh_d_id)); ++ return; ++ ++ case ELS_LS_RJT: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT", ++ ntoh24(fchdr->fh_d_id)); ++ return; ++ ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGO response not accepted or rejected: 0x%x", ++ flogo_rsp->els.fl_cmd); ++ } ++} ++ ++static void ++fdls_process_flogi_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr, void *rx_frame) ++{ ++ struct fnic_fdls_fabric_s *fabric = &iport->fabric; ++ struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr; ++ uint8_t *fcid; ++ uint16_t rdf_size; ++ uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 }; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FDLS processing FLOGI response", iport->fcid); ++ ++ if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI response received in state (%d). Dropping frame", ++ fdls_get_state(fabric)); ++ return; ++ } ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req); ++ return; ++ } ++ ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (flogi_rsp->els.fl_cmd) { ++ case ELS_LS_ACC: ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport fcid: 0x%x Canceling fabric disc timer\n", ++ iport->fcid); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ ++ iport->fabric.timer_pending = 0; ++ iport->fabric.retry_counter = 0; ++ fcid = FNIC_STD_GET_D_ID(fchdr); ++ iport->fcid = ntoh24(fcid); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: FLOGI response accepted", iport->fcid); ++ ++ /* Learn the Service Params */ ++ rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els)); ++ if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE) ++ && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN)) ++ iport->max_payload_size = min(rdf_size, ++ iport->max_payload_size); ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "max_payload_size from fabric: %u set: %d", rdf_size, ++ iport->max_payload_size); ++ ++ iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els)); ++ iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els)); ++ ++ if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC) ++ iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC; ++ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "From fabric: R_A_TOV: %d E_D_TOV: %d", ++ iport->r_a_tov, iport->e_d_tov); ++ ++ if (IS_FNIC_FCP_INITIATOR(fnic)) { ++ fc_host_fabric_name(iport->fnic->lport->host) = ++ get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els)); ++ fc_host_port_id(iport->fnic->lport->host) = iport->fcid; ++ } ++ ++ fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid); ++ ++ memcpy(&fcmac[3], fcid, 3); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x", ++ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], ++ fcmac[5]); ++ vnic_dev_add_addr(iport->fnic->vdev, fcmac); ++ ++ if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) { ++ fnic_fdls_start_plogi(iport); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI response received. Starting PLOGI"); ++ } else { ++ /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to ++ * FDLS_STATE_LINKDOWN ++ * state, hence we don't have to worry about undoing: ++ * the fnic_fdls_register_portid and vnic_dev_add_addr ++ */ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI response received in state (%d). Dropping frame", ++ fdls_get_state(fabric)); ++ } ++ break; ++ ++ case ELS_LS_RJT: ++ if (fabric->retry_counter < iport->max_flogi_retries) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p", ++ iport); ++ ++ /* Retry Flogi again from the timer routine. */ ++ fabric->flags |= FNIC_FDLS_RETRY_FRAME; ++ ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI returned ELS_LS_RJT. Halting discovery %p", ++ iport); ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport 0x%p Canceling fabric disc timer\n", ++ iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ fabric->timer_pending = 0; ++ fabric->retry_counter = 0; ++ } ++ break; ++ ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FLOGI response not accepted: 0x%x", ++ flogi_rsp->els.fl_cmd); ++ break; ++ } ++} ++ ++static void ++fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr; ++ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; ++ struct fnic_fdls_fabric_s *fdls = &iport->fabric; ++ struct fnic *fnic = iport->fnic; ++ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); ++ ++ if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Fabric PLOGI response received in state (%d). Dropping frame", ++ fdls_get_state(&iport->fabric)); ++ return; ++ } ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", ++ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); ++ return; ++ } ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ switch (plogi_rsp->els.fl_cmd) { ++ case ELS_LS_ACC: ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport fcid: 0x%x fabric PLOGI response: Accepted\n", ++ iport->fcid); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ iport->fabric.retry_counter = 0; ++ fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID); ++ fdls_send_rpn_id(iport); ++ break; ++ case ELS_LS_RJT: ++ ++ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) ++ || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) ++ && (iport->fabric.retry_counter < iport->max_plogi_retries)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine", ++ iport->fcid); ++ } else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery", ++ iport->fcid); ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport fcid: 0x%x Canceling fabric disc timer\n", ++ iport->fcid); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ iport->fabric.retry_counter = 0; ++ return; ++ } ++ break; ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "PLOGI response not accepted: 0x%x", ++ plogi_rsp->els.fl_cmd); ++ break; ++ } ++} ++ ++static void ++fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ uint32_t s_id; ++ struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr; ++ struct fc_std_abts_ba_rjt *ba_rjt; ++ uint32_t fabric_state = iport->fabric.state; ++ struct fnic *fnic = iport->fnic; ++ int frame_type; ++ uint16_t oxid; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr; ++ ++ if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI) ++ || (s_id == FC_FID_FCTRL))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received abts rsp with invalid SID: 0x%x. Dropping frame", ++ s_id); ++ return; ++ } ++ ++ oxid = FNIC_STD_GET_OX_ID(fchdr); ++ if (iport->active_oxid_fabric_req != oxid) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received abts rsp with invalid oxid: 0x%x. Dropping frame", ++ oxid); ++ return; ++ } ++ ++ if (iport->fabric.timer_pending) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Canceling fabric disc timer %p\n", iport); ++ fnic_del_fabric_timer_sync(fnic); ++ } ++ iport->fabric.timer_pending = 0; ++ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; ++ ++ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x", ++ fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id)); ++ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x", ++ fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr), ++ ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan); ++ } ++ ++ frame_type = FNIC_FRAME_TYPE(oxid); ++ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); ++ ++ /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */ ++ switch (frame_type) { ++ case FNIC_FRAME_TYPE_FABRIC_FLOGI: ++ if (iport->fabric.retry_counter < iport->max_flogi_retries) ++ fdls_send_fabric_flogi(iport); ++ else ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Exceeded max FLOGI retries"); ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_LOGO: ++ if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY) ++ fdls_send_fabric_logo(iport); ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_PLOGI: ++ if (iport->fabric.retry_counter < iport->max_plogi_retries) ++ fdls_send_fabric_plogi(iport); ++ else ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Exceeded max PLOGI retries"); ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_RPN: ++ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) ++ fdls_send_rpn_id(iport); ++ else ++ /* go back to fabric Plogi */ ++ fnic_fdls_start_plogi(iport); ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_SCR: ++ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) ++ fdls_send_scr(iport); ++ else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "SCR exhausted retries. Start fabric PLOGI %p", ++ iport); ++ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ ++ } ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_RFT: ++ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) ++ fdls_send_register_fc4_types(iport); ++ else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RFT exhausted retries. Start fabric PLOGI %p", ++ iport); ++ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ ++ } ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_RFF: ++ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) ++ fdls_send_register_fc4_features(iport); ++ else { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "RFF exhausted retries. Start fabric PLOGI %p", ++ iport); ++ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ ++ } ++ break; ++ case FNIC_FRAME_TYPE_FABRIC_GPN_FT: ++ if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT) ++ fdls_send_gpn_ft(iport, fabric_state); ++ else ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "GPN FT exhausted retries. Start fabric PLOGI %p", ++ iport); ++ break; ++ default: ++ /* ++ * We should not be here since we already validated rx oxid with ++ * our active_oxid_fabric_req ++ */ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Invalid OXID/active oxid 0x%x\n", oxid); ++ WARN_ON(true); ++ return; ++ } ++} ++ ++/* ++ * Performs a validation for all FCOE frames and return the frame type ++ */ ++int ++fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, ++ struct fc_frame_header *fchdr) ++{ ++ uint8_t type; ++ uint8_t *fc_payload; ++ uint16_t oxid; ++ uint32_t s_id; ++ uint32_t d_id; ++ struct fnic *fnic = iport->fnic; ++ struct fnic_fdls_fabric_s *fabric = &iport->fabric; ++ int oxid_frame_type; ++ ++ oxid = FNIC_STD_GET_OX_ID(fchdr); ++ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); ++ type = *fc_payload; ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ ++ /* some common validation */ ++ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) { ++ if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "invalid frame received. Dropping frame"); ++ return -1; ++ } ++ } ++ ++ /* BLS ABTS response */ ++ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC) ++ || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) { ++ if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received ABTS invalid frame. Dropping frame"); ++ return -1; ++ ++ } ++ if (fdls_is_oxid_fabric_req(oxid)) { ++ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame", ++ oxid, s_id); ++ return -1; ++ } ++ return FNIC_FABRIC_BLS_ABTS_RSP; ++ } else if (fdls_is_oxid_fdmi_req(oxid)) { ++ return FNIC_FDMI_BLS_ABTS_RSP; ++ } else if (fdls_is_oxid_tgt_req(oxid)) { ++ return FNIC_TPORT_BLS_ABTS_RSP; ++ } ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame", ++ oxid, s_id); ++ return -1; ++ } ++ ++ /* BLS ABTS Req */ ++ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS) ++ && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Receiving Abort Request from s_id: 0x%x", s_id); ++ return FNIC_BLS_ABTS_REQ; ++ } ++ ++ /* unsolicited requests frames */ ++ if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) { ++ switch (type) { ++ case ELS_LOGO: ++ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) ++ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr)) ++ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received LOGO invalid frame. Dropping frame"); ++ return -1; ++ } ++ return FNIC_ELS_LOGO_REQ; ++ case ELS_RSCN: ++ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) ++ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) ++ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received RSCN invalid FCTL. Dropping frame"); ++ return -1; ++ } ++ if (s_id != FC_FID_FCTRL) ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.", ++ fchdr->fh_f_ctl[0], fchdr->fh_type, s_id); ++ return FNIC_ELS_RSCN_REQ; ++ case ELS_PLOGI: ++ return FNIC_ELS_PLOGI_REQ; ++ case ELS_ECHO: ++ return FNIC_ELS_ECHO_REQ; ++ case ELS_ADISC: ++ return FNIC_ELS_ADISC; ++ case ELS_RLS: ++ return FNIC_ELS_RLS; ++ case ELS_RRQ: ++ return FNIC_ELS_RRQ; ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Unsupported frame (type:0x%02x) from fcid: 0x%x", ++ type, s_id); ++ return FNIC_ELS_UNSUPPORTED_REQ; ++ } ++ } ++ ++ /* solicited response from fabric or target */ ++ oxid_frame_type = FNIC_FRAME_TYPE(oxid); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid); ++ switch (oxid_frame_type) { ++ case FNIC_FRAME_TYPE_FABRIC_FLOGI: ++ if (type == ELS_LS_ACC) { ++ if ((s_id != FC_FID_FLOGI) ++ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ } ++ return FNIC_FABRIC_FLOGI_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_PLOGI: ++ if (type == ELS_LS_ACC) { ++ if ((s_id != FC_FID_DIR_SERV) ++ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ } ++ return FNIC_FABRIC_PLOGI_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_SCR: ++ if (type == ELS_LS_ACC) { ++ if ((s_id != FC_FID_FCTRL) ++ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ } ++ return FNIC_FABRIC_SCR_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_RPN: ++ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ return FNIC_FABRIC_RPN_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_RFT: ++ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ return FNIC_FABRIC_RFT_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_RFF: ++ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ return FNIC_FABRIC_RFF_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_GPN_FT: ++ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown frame. Dropping frame"); ++ return -1; ++ } ++ return FNIC_FABRIC_GPN_FT_RSP; ++ ++ case FNIC_FRAME_TYPE_FABRIC_LOGO: ++ return FNIC_FABRIC_LOGO_RSP; ++ default: ++ /* Drop the Rx frame and log/stats it */ ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Solicited response: unknown OXID: 0x%x", oxid); ++ return -1; ++ } ++ ++ return -1; ++} ++ ++void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, ++ int len, int fchdr_offset) ++{ ++ struct fc_frame_header *fchdr; ++ uint32_t s_id = 0; ++ uint32_t d_id = 0; ++ struct fnic *fnic = iport->fnic; ++ int frame_type; ++ ++ fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset); ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ ++ fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming"); ++ ++ frame_type = ++ fnic_fdls_validate_and_get_frame_type(iport, fchdr); ++ ++ /*if we are in flogo drop everything else */ ++ if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO && ++ frame_type != FNIC_FABRIC_LOGO_RSP) ++ return; ++ ++ switch (frame_type) { ++ case FNIC_FABRIC_FLOGI_RSP: ++ fdls_process_flogi_rsp(iport, fchdr, rx_frame); ++ break; ++ case FNIC_FABRIC_PLOGI_RSP: ++ fdls_process_fabric_plogi_rsp(iport, fchdr); ++ break; ++ case FNIC_FABRIC_RPN_RSP: ++ fdls_process_rpn_id_rsp(iport, fchdr); ++ break; ++ case FNIC_FABRIC_RFT_RSP: ++ fdls_process_rft_id_rsp(iport, fchdr); ++ break; ++ case FNIC_FABRIC_RFF_RSP: ++ fdls_process_rff_id_rsp(iport, fchdr); ++ break; ++ case FNIC_FABRIC_SCR_RSP: ++ fdls_process_scr_rsp(iport, fchdr); ++ break; ++ case FNIC_FABRIC_GPN_FT_RSP: ++ fdls_process_gpn_ft_rsp(iport, fchdr, len); ++ break; ++ case FNIC_FABRIC_LOGO_RSP: ++ fdls_process_fabric_logo_rsp(iport, fchdr); ++ break; ++ case FNIC_FABRIC_BLS_ABTS_RSP: ++ fdls_process_fabric_abts_rsp(iport, fchdr); ++ break; ++ default: ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "s_id: 0x%x d_did: 0x%x", s_id, d_id); ++ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Received unknown FCoE frame of len: %d. Dropping frame", len); ++ break; ++ } ++} +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 73fb8245c7b7..5854b76d68e4 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -24,6 +24,10 @@ + #include + #include + #include ++#include ++#include ++#include ++#include + #include "fnic_io.h" + #include "fnic_res.h" + #include "fnic_trace.h" +@@ -36,6 +40,7 @@ + #include "vnic_intr.h" + #include "vnic_stats.h" + #include "vnic_scsi.h" ++#include "fnic_fdls.h" + + #define DRV_NAME "fnic" + #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" +@@ -43,6 +48,7 @@ + #define PFX DRV_NAME ": " + #define DFX DRV_NAME "%d: " + ++#define FABRIC_LOGO_MAX_RETRY 3 + #define DESC_CLEAN_LOW_WATERMARK 8 + #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ + #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ +@@ -51,6 +57,7 @@ + #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ + #define FNIC_DFLT_QUEUE_DEPTH 256 + #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ ++#define LUN0_DELAY_TIME 9 + + /* + * Tag bits used for special requests. +@@ -88,6 +95,8 @@ + #define FNIC_DEV_RST_TERM_DONE BIT(20) + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + ++#define IS_FNIC_FCP_INITIATOR(fnic) (fnic->role == FNIC_ROLE_FCP_INITIATOR) ++ + /* + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. +@@ -207,12 +216,26 @@ enum fnic_state { + + struct mempool; + ++enum fnic_role_e { ++ FNIC_ROLE_FCP_INITIATOR = 0, ++}; ++ + enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, + }; + ++struct fnic_frame_list { ++ /* ++ * Link to frame lists ++ */ ++ struct list_head links; ++ void *fp; ++ int frame_len; ++ int rx_ethhdr_stripped; ++}; ++ + struct fnic_event { + struct list_head list; + struct fnic *fnic; +@@ -229,6 +252,8 @@ struct fnic_cpy_wq { + /* Per-instance private data structure */ + struct fnic { + int fnic_num; ++ enum fnic_role_e role; ++ struct fnic_iport_s iport; + struct fc_lport *lport; + struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ + struct vnic_dev_bar bar0; +@@ -269,6 +294,7 @@ struct fnic { + unsigned long state_flags; /* protected by host lock */ + enum fnic_state state; + spinlock_t fnic_lock; ++ unsigned long lock_flags; + + u16 vlan_id; /* VLAN tag including priority */ + u8 data_src_addr[ETH_ALEN]; +@@ -299,7 +325,9 @@ struct fnic { + struct work_struct frame_work; + struct work_struct flush_work; + struct sk_buff_head frame_queue; +- struct sk_buff_head tx_queue; ++ struct list_head tx_queue; ++ mempool_t *frame_pool; ++ mempool_t *frame_elem_pool; + + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); +@@ -353,6 +381,9 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); + void fnic_handle_frame(struct work_struct *work); + void fnic_handle_link(struct work_struct *work); + void fnic_handle_event(struct work_struct *work); ++void fdls_reclaim_oxid_handler(struct work_struct *work); ++void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid); ++void fdls_schedule_oxid_free_retry_work(struct work_struct *work); + int fnic_rq_cmpl_handler(struct fnic *fnic, int); + int fnic_alloc_rq_frame(struct vnic_rq *rq); + void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); +@@ -389,7 +420,6 @@ void fnic_handle_fip_frame(struct work_struct *work); + void fnic_handle_fip_event(struct fnic *fnic); + void fnic_fcoe_reset_vlans(struct fnic *fnic); + void fnic_fcoe_evlist_free(struct fnic *fnic); +-extern void fnic_handle_fip_timer(struct fnic *fnic); + + static inline int + fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) +@@ -398,4 +428,74 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) + } + void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); ++void fnic_free_txq(struct list_head *head); ++ ++struct fnic_scsi_iter_data { ++ struct fnic *fnic; ++ void *data1; ++ void *data2; ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2); ++}; ++ ++static inline bool ++fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) ++{ ++ struct fnic_scsi_iter_data *iter = iter_data; ++ ++ return iter->fn(iter->fnic, sc, iter->data1, iter->data2); ++} ++ ++static inline void ++fnic_scsi_io_iter(struct fnic *fnic, ++ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, ++ void *data1, void *data2), ++ void *data1, void *data2) ++{ ++ struct fnic_scsi_iter_data iter_data = { ++ .fn = fn, ++ .fnic = fnic, ++ .data1 = data1, ++ .data2 = data2, ++ }; ++ scsi_host_busy_iter(fnic->lport->host, fnic_io_iter_handler, &iter_data); ++} ++ ++#ifdef FNIC_DEBUG ++static inline void ++fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) ++{ ++ int i; ++ ++ for (i = 0; i < len; i = i+8) { ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, ++ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], ++ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); ++ } ++} ++ ++static inline void ++fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ int len, char *pfx) ++{ ++ uint32_t s_id, d_id; ++ ++ s_id = ntoh24(fchdr->fh_s_id); ++ d_id = ntoh24(fchdr->fh_d_id); ++ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", ++ pfx, s_id, d_id, fchdr->fh_type, ++ FNIC_STD_GET_OX_ID(fchdr), len); ++ ++ fnic_debug_dump(fnic, (uint8_t *)fchdr, len); ++ ++} ++#else /* FNIC_DEBUG */ ++static inline void ++fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} ++static inline void ++fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, ++ uint32_t len, char *pfx) {} ++#endif /* FNIC_DEBUG */ + #endif /* _FNIC_H_ */ +* Unmerged path drivers/scsi/fnic/fnic_fcs.c +diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h +index 5d78eea20873..14630b051487 100644 +--- a/drivers/scsi/fnic/fnic_fdls.h ++++ b/drivers/scsi/fnic/fnic_fdls.h +@@ -402,7 +402,6 @@ int fnic_send_fip_frame(struct fnic_iport_s *iport, + void *frame, int frame_size); + void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid); +- + void fnic_fdls_add_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags); + void fnic_fdls_remove_tport(struct fnic_iport_s *iport, +diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h +index 1cb6a68c8e4e..8b3113876796 100644 +--- a/drivers/scsi/fnic/fnic_io.h ++++ b/drivers/scsi/fnic/fnic_io.h +@@ -65,15 +65,4 @@ struct fnic_io_req { + struct completion *abts_done; /* completion for abts */ + struct completion *dr_done; /* completion for device reset */ + }; +- +-enum fnic_port_speeds { +- DCEM_PORTSPEED_NONE = 0, +- DCEM_PORTSPEED_1G = 1000, +- DCEM_PORTSPEED_10G = 10000, +- DCEM_PORTSPEED_20G = 20000, +- DCEM_PORTSPEED_25G = 25000, +- DCEM_PORTSPEED_40G = 40000, +- DCEM_PORTSPEED_4x10G = 41000, +- DCEM_PORTSPEED_100G = 100000, +-}; + #endif /* _FNIC_IO_H_ */ +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33..3b5f11dce698 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -41,6 +41,8 @@ + #include "fnic_io.h" + #include "fnic_fip.h" + #include "fnic.h" ++#include "fnic_fdls.h" ++#include "fdls_fc.h" + + #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 + +@@ -49,6 +51,8 @@ + + static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; + static struct kmem_cache *fnic_io_req_cache; ++static struct kmem_cache *fdls_frame_cache; ++static struct kmem_cache *fdls_frame_elem_cache; + static LIST_HEAD(fnic_list); + static DEFINE_SPINLOCK(fnic_list_lock); + static DEFINE_IDA(fnic_ida); +@@ -90,7 +94,6 @@ module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + + static struct libfc_function_template fnic_transport_template = { +- .frame_send = fnic_send, + .lport_set_port_id = fnic_set_port_id, + .fcp_abort_io = fnic_empty_scsi_cleanup, + .fcp_cleanup = fnic_empty_scsi_cleanup, +@@ -428,7 +431,7 @@ static void fnic_fip_notify_timer(struct timer_list *t) + { + struct fnic *fnic = from_timer(fnic, t, fip_timer); + +- fnic_handle_fip_timer(fnic); ++ /* Placeholder function */ + } + + static void fnic_notify_timer_start(struct fnic *fnic) +@@ -530,6 +533,8 @@ static int fnic_cleanup(struct fnic *fnic) + vnic_intr_clean(&fnic->intr[i]); + + mempool_destroy(fnic->io_req_pool); ++ mempool_destroy(fnic->frame_pool); ++ mempool_destroy(fnic->frame_elem_pool); + for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) + mempool_destroy(fnic->io_sgl_pool[i]); + +@@ -792,6 +797,17 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + goto err_out_free_dflt_pool; + fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + ++ pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache); ++ if (!pool) ++ goto err_out_fdls_frame_pool; ++ fnic->frame_pool = pool; ++ ++ pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM, ++ fdls_frame_elem_cache); ++ if (!pool) ++ goto err_out_fdls_frame_elem_pool; ++ fnic->frame_elem_pool = pool; ++ + /* setup vlan config, hw inserts vlan header */ + fnic->vlan_hw_insert = 1; + fnic->vlan_id = 0; +@@ -915,7 +931,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + INIT_WORK(&fnic->frame_work, fnic_handle_frame); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); + skb_queue_head_init(&fnic->frame_queue); +- skb_queue_head_init(&fnic->tx_queue); ++ INIT_LIST_HEAD(&fnic->tx_queue); + + /* Enable all queues */ + for (i = 0; i < fnic->raw_wq_count; i++) +@@ -950,6 +966,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + for (i = 0; i < fnic->rq_count; i++) + vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + vnic_dev_notify_unset(fnic->vdev); ++ mempool_destroy(fnic->frame_elem_pool); ++err_out_fdls_frame_elem_pool: ++ mempool_destroy(fnic->frame_pool); ++err_out_fdls_frame_pool: + err_out_free_max_pool: + mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); + err_out_free_dflt_pool: +@@ -986,6 +1006,14 @@ static void fnic_remove(struct pci_dev *pdev) + struct fc_lport *lp = fnic->lport; + unsigned long flags; + ++ /* ++ * Sometimes when probe() fails and do not exit with an error code, ++ * remove() gets called with 'drvdata' not set. Avoid a crash by ++ * adding a defensive check. ++ */ ++ if (!fnic) ++ return; ++ + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events to the local port. ISR and +@@ -1005,7 +1033,7 @@ static void fnic_remove(struct pci_dev *pdev) + */ + flush_workqueue(fnic_event_queue); + skb_queue_purge(&fnic->frame_queue); +- skb_queue_purge(&fnic->tx_queue); ++ fnic_free_txq(&fnic->tx_queue); + + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); +@@ -1037,7 +1065,6 @@ static void fnic_remove(struct pci_dev *pdev) + fnic_cleanup(fnic); + + BUG_ON(!skb_queue_empty(&fnic->frame_queue)); +- BUG_ON(!skb_queue_empty(&fnic->tx_queue)); + + spin_lock_irqsave(&fnic_list_lock, flags); + list_del(&fnic->list); +@@ -1132,6 +1159,24 @@ static int __init fnic_init_module(void) + goto err_create_fnic_ioreq_slab; + } + ++ fdls_frame_cache = kmem_cache_create("fdls_frames", ++ FNIC_FCOE_FRAME_MAXSZ, ++ 0, SLAB_HWCACHE_ALIGN, NULL); ++ if (!fdls_frame_cache) { ++ pr_err("fnic fdls frame cache create failed\n"); ++ err = -ENOMEM; ++ goto err_create_fdls_frame_cache; ++ } ++ ++ fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem", ++ sizeof(struct fnic_frame_list), ++ 0, SLAB_HWCACHE_ALIGN, NULL); ++ if (!fdls_frame_elem_cache) { ++ pr_err("fnic fdls frame elem cache create failed\n"); ++ err = -ENOMEM; ++ goto err_create_fdls_frame_cache_elem; ++ } ++ + fnic_event_queue = + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq"); + if (!fnic_event_queue) { +@@ -1170,6 +1215,10 @@ static int __init fnic_init_module(void) + err_create_fip_workq: + destroy_workqueue(fnic_event_queue); + err_create_fnic_workq: ++ kmem_cache_destroy(fdls_frame_elem_cache); ++err_create_fdls_frame_cache_elem: ++ kmem_cache_destroy(fdls_frame_cache); ++err_create_fdls_frame_cache: + kmem_cache_destroy(fnic_io_req_cache); + err_create_fnic_ioreq_slab: + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); +@@ -1191,6 +1240,7 @@ static void __exit fnic_cleanup_module(void) + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); + kmem_cache_destroy(fnic_io_req_cache); ++ kmem_cache_destroy(fdls_frame_cache); + fc_release_transport(fnic_fc_transport); + fnic_trace_free(); + fnic_fc_trace_free(); +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f..2556d10eb80f 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -213,7 +213,7 @@ int fnic_fw_reset_handler(struct fnic *fnic) + fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); + + skb_queue_purge(&fnic->frame_queue); +- skb_queue_purge(&fnic->tx_queue); ++ fnic_free_txq(&fnic->tx_queue); + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) +@@ -688,7 +688,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, + */ + if (fnic->remove_wait || ret) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +- skb_queue_purge(&fnic->tx_queue); ++ fnic_free_txq(&fnic->tx_queue); + goto reset_cmpl_handler_end; + } + +@@ -1648,7 +1648,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) + return true; + } + +-static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) ++void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) + { + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct fnic_rport_abort_io_iter_data iter_data = { diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a7510fbd.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a7510fbd.failed new file mode 100644 index 0000000000000..5b7373d8418c1 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a7510fbd.failed @@ -0,0 +1,39 @@ +scsi: fnic: Call scsi_done() directly + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Bart Van Assche +commit a7510fbd879e98baf2848b4646adbbd2b9d0fbb3 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a7510fbd.failed + +Conditional statements are faster than indirect calls. Hence call +scsi_done() directly. + +Link: https://lore.kernel.org/r/20211007202923.2174984-36-bvanassche@acm.org + Signed-off-by: Bart Van Assche + Signed-off-by: Martin K. Petersen +(cherry picked from commit a7510fbd879e98baf2848b4646adbbd2b9d0fbb3) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_scsi.c +index f6aadfd9405d,09b8bf5adaf5..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -1052,6 -1048,9 +1051,12 @@@ static void fnic_fcpio_icmnd_cmpl_handl + if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time)) + atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time); + } +++<<<<<<< HEAD +++======= ++ ++ /* Call SCSI completion function to complete the IO */ ++ scsi_done(sc); +++>>>>>>> a7510fbd879e (scsi: fnic: Call scsi_done() directly) + } + + /* fnic_fcpio_itmf_cmpl_handler +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a8650a5e.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a8650a5e.failed new file mode 100644 index 0000000000000..a8ee5c47d003b --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a8650a5e.failed @@ -0,0 +1,595 @@ +scsi: fnic: Add stats and related functionality + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit a8650a5eaaf123572a7b2d6b1fe9f6b000b6b6a6 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/a8650a5e.failed + +Add statistics and related functionality for FDLS. + +Add supporting functions to display stats. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-13-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit a8650a5eaaf123572a7b2d6b1fe9f6b000b6b6a6) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic_main.c +# drivers/scsi/fnic/fnic_scsi.c +# drivers/scsi/fnic/fnic_stats.h +# drivers/scsi/fnic/fnic_trace.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,628c9e5902a2..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -165,22 -162,38 +165,34 @@@ static struct fc_function_template fnic + .show_starget_port_id = 1, + .show_rport_dev_loss_tmo = 1, + .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, +++<<<<<<< HEAD + + .issue_fc_host_lip = fnic_reset, +++======= ++ .issue_fc_host_lip = fnic_issue_fc_host_lip, +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + .get_fc_host_stats = fnic_get_stats, + .reset_fc_host_stats = fnic_reset_host_stats, + - .dd_fcrport_size = sizeof(struct rport_dd_data_s), + + .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .terminate_rport_io = fnic_terminate_rport_io, + - .bsg_request = NULL, + + .bsg_request = fc_lport_bsg_request, + }; + + static void fnic_get_host_speed(struct Scsi_Host *shost) + { + - struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + struct fc_lport *lp = shost_priv(shost); + + struct fnic *fnic = lport_priv(lp); + u32 port_speed = vnic_dev_port_speed(fnic->vdev); ++ struct fnic_stats *fnic_stats = &fnic->fnic_stats; + +++<<<<<<< HEAD +++======= ++ FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "port_speed: %d Mbps", port_speed); ++ atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed); ++ +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + /* Add in other values as they get defined in fw */ + switch (port_speed) { + - case DCEM_PORTSPEED_1G: + - fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + - break; + - case DCEM_PORTSPEED_2G: + - fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + - break; + - case DCEM_PORTSPEED_4G: + - fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + - break; + - case DCEM_PORTSPEED_8G: + - fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + - break; + case DCEM_PORTSPEED_10G: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; +@@@ -206,13 -236,14 +218,23 @@@ + static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) + { + int ret; +++<<<<<<< HEAD + + struct fc_lport *lp = shost_priv(host); + + struct fnic *fnic = lport_priv(lp); + + struct fc_host_statistics *stats = &lp->host_stats; + + struct vnic_stats *vs; + + unsigned long flags; + + + + if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) +++======= ++ struct fnic *fnic = *((struct fnic **) shost_priv(host)); ++ struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats; ++ struct vnic_stats *vs; ++ unsigned long flags; ++ ++ if (time_before ++ (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + return stats; + fnic->stats_time = jiffies; + +@@@ -221,24 -252,22 +243,41 @@@ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { +++<<<<<<< HEAD + + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + + "fnic: Get vnic stats failed" + + " 0x%x", ret); +++======= ++ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, ++ "fnic: Get vnic stats failed: 0x%x", ret); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + return stats; + } + vs = fnic->stats; + stats->tx_frames = vs->tx.tx_unicast_frames_ok; +++<<<<<<< HEAD + + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + + stats->rx_frames = vs->rx.rx_unicast_frames_ok; + + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; +++======= ++ stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; ++ stats->rx_frames = vs->rx.rx_unicast_frames_ok; ++ stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; + stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; + stats->invalid_crc_count = vs->rx.rx_crc_errors; + stats->seconds_since_last_reset = +++<<<<<<< HEAD + + (jiffies - fnic->stats_reset_time) / HZ; + + stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); + + stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); + + +++======= ++ (jiffies - fnic->stats_reset_time) / HZ; ++ stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); ++ stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + return stats; + } + +diff --cc drivers/scsi/fnic/fnic_scsi.c +index 321954ca143f,e464c677e9da..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -455,9 -488,9 +455,13 @@@ static int fnic_queuecommand_lck(struc + + ret = fc_remote_port_chkready(rport); + if (ret) { + - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "rport is not ready\n"); +++<<<<<<< HEAD + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); +++======= ++ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + sc->result = ret; + done(sc); + return 0; +@@@ -1005,22 -1127,19 +1009,31 @@@ static void fnic_fcpio_icmnd_cmpl_handl + ((u64)icmnd_cmpl->_resvd0[1] << 56 | + (u64)icmnd_cmpl->_resvd0[0] << 48 | + jiffies_to_msecs(jiffies - start_time)), + - desc, cmd_trace, fnic_flags_and_state(sc)); + + desc, cmd_trace, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + + + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + + fnic->lport->host_stats.fcp_input_requests++; + + fnic->fcp_input_bytes += xfer_len; + + } else if (sc->sc_data_direction == DMA_TO_DEVICE) { + + fnic->lport->host_stats.fcp_output_requests++; + + fnic->fcp_output_bytes += xfer_len; + + } else + + fnic->lport->host_stats.fcp_control_requests++; + ++ if (sc->sc_data_direction == DMA_FROM_DEVICE) { ++ fnic_stats->host_stats.fcp_input_requests++; ++ fnic->fcp_input_bytes += xfer_len; ++ } else if (sc->sc_data_direction == DMA_TO_DEVICE) { ++ fnic_stats->host_stats.fcp_output_requests++; ++ fnic->fcp_output_bytes += xfer_len; ++ } else ++ fnic_stats->host_stats.fcp_control_requests++; ++ + /* Call SCSI completion function to complete the IO */ + - scsi_done(sc); + + if (sc->scsi_done) + + sc->scsi_done(sc); + + spin_unlock_irqrestore(io_lock, flags); + + mempool_free(io_req, fnic->io_req_pool); + +@@@ -1674,37 -1923,74 +1687,59 @@@ static void fnic_rport_exch_reset(struc + + void fnic_terminate_rport_io(struct fc_rport *rport) + { + - struct fnic_tport_s *tport; + - struct rport_dd_data_s *rdd_data; + - struct fnic_iport_s *iport = NULL; + - struct fnic *fnic = NULL; + + struct fc_rport_libfc_priv *rdata; + + struct fc_lport *lport; + + struct fnic *fnic; + + if (!rport) { + - pr_err("rport is NULL\n"); + + printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + return; + } + + rdata = rport->dd_data; + + - rdd_data = rport->dd_data; + - if (rdd_data) { + - tport = rdd_data->tport; + - if (!tport) { + - pr_err( + - "term rport io called after tport is deleted. Returning 0x%8x\n", + - rport->port_id); + - } else { + - pr_err( + - "term rport io called after tport is set 0x%8x\n", + - rport->port_id); + - pr_err( + - "tport maybe rediscovered\n"); + - + - iport = (struct fnic_iport_s *) tport->iport; + - fnic = iport->fnic; + - fnic_rport_exch_reset(fnic, rport->port_id); + - } + + if (!rdata) { + + printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); + + return; + } + -} + + lport = rdata->local_port; + + -/* + - * FCP-SCSI specific handling for module unload + - * + - */ + -void fnic_scsi_unload(struct fnic *fnic) + -{ + - unsigned long flags; + + if (!lport) { + + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); + + return; + + } + + fnic = lport_priv(lport); + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, "fnic_terminate_rport_io called" + + " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", + + rport->port_name, rport->node_name, rport, + + rport->port_id); + + - /* + - * Mark state so that the workqueue thread stops forwarding + - * received frames and link events to the local port. ISR and + - * other threads that can queue work items will also stop + - * creating work items on the fnic workqueue + - */ + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->in_remove) + + return; + +++<<<<<<< HEAD + + fnic_rport_exch_reset(fnic, rport->port_id); +++======= ++ if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) ++ fnic_scsi_fcpio_reset(fnic); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ fnic->in_remove = 1; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ fnic_flush_tport_event_list(fnic); ++ fnic_delete_fcp_tports(fnic); ++ } ++ ++ void fnic_scsi_unload_cleanup(struct fnic *fnic) ++ { ++ int hwq = 0; ++ ++ fc_remove_host(fnic->lport->host); ++ scsi_remove_host(fnic->lport->host); ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) ++ kfree(fnic->sw_copy_wq[hwq].io_req_table); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + } + + /* +@@@ -1757,6 -2056,35 +1792,37 @@@ int fnic_abort_cmd(struct scsi_cmnd *sc + goto fnic_abort_cmd_end; + } + +++<<<<<<< HEAD +++======= ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x", ++ rport->port_id, sc->device->lun, hwq, mqtag); ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Op: 0x%x flags: 0x%x\n", ++ sc->cmnd[0], ++ fnic_priv(sc)->flags); ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport NOT in READY state"); ++ ret = FAILED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_abort_cmd_end; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) && ++ (tport->state != FDLS_TGT_STATE_ADISC)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "tport state: %d\n", tport->state); ++ ret = FAILED; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_abort_cmd_end; ++ } ++ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + /* + * Avoid a race between SCSI issuing the abort and the device + * completing the command. +@@@ -1822,7 -2151,7 +1888,11 @@@ + if (fc_remote_port_chkready(rport) == 0) + task_req = FCPIO_ITMF_ABT_TASK; + else { +++<<<<<<< HEAD + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); +++======= ++ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + task_req = FCPIO_ITMF_ABT_TASK_TERM; + } + +@@@ -2222,16 -2571,43 +2292,41 @@@ int fnic_device_reset(struct scsi_cmnd + atomic64_inc(&reset_stats->device_resets); + + rport = starget_to_rport(scsi_target(sc->device)); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + + "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", + + rport->port_id, sc->device->lun, sc); + + - spin_lock_irqsave(&fnic->fnic_lock, flags); + - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + - "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", + - rport->port_id, sc->device->lun, hwq, mqtag, + - fnic_priv(sc)->flags); + - + - rdd_data = rport->dd_data; + - tport = rdd_data->tport; + - if (!tport) { + - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + - "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n", + - rport->port_id, sc->device->lun); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) + goto fnic_device_reset_end; +++<<<<<<< HEAD + + + + /* Check if remote port up */ + + if (fc_remote_port_chkready(rport)) { + + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); +++======= ++ } ++ ++ if (iport->state != FNIC_IPORT_STATE_READY) { ++ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "iport NOT in READY state"); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ ++ if ((tport->state != FDLS_TGT_STATE_READY) && ++ (tport->state != FDLS_TGT_STATE_ADISC)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "tport state: %d\n", tport->state); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ goto fnic_device_reset_end; ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ /* Check if remote port up */ ++ if (fc_remote_port_chkready(rport)) { ++ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + goto fnic_device_reset_end; + } + +@@@ -2707,3 -3016,76 +2802,79 @@@ int fnic_is_abts_pending(struct fnic *f + + return iter_data.ret; + } +++<<<<<<< HEAD +++======= ++ ++ /* ++ * SCSI Error handling calls driver's eh_host_reset if all prior ++ * error handling levels return FAILED. If host reset completes ++ * successfully, and if link is up, then Fabric login begins. ++ * ++ * Host Reset is the highest level of error recovery. If this fails, then ++ * host is offlined by SCSI. ++ * ++ */ ++ int fnic_eh_host_reset_handler(struct scsi_cmnd *sc) ++ { ++ int ret = 0; ++ struct Scsi_Host *shost = sc->device->host; ++ struct fnic *fnic = *((struct fnic **) shost_priv(shost)); ++ ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, ++ "SCSI error handling: fnic host reset"); ++ ++ ret = fnic_host_reset(shost); ++ return ret; ++ } ++ ++ ++ void fnic_scsi_fcpio_reset(struct fnic *fnic) ++ { ++ unsigned long flags; ++ enum fnic_state old_state; ++ struct fnic_iport_s *iport = &fnic->iport; ++ DECLARE_COMPLETION_ONSTACK(fw_reset_done); ++ int time_remain; ++ ++ /* issue fw reset */ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { ++ /* fw reset is in progress, poll for its completion */ ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "fnic is in unexpected state: %d for fw_reset\n", ++ fnic->state); ++ return; ++ } ++ ++ old_state = fnic->state; ++ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; ++ ++ fnic_update_mac_locked(fnic, iport->hwmac); ++ fnic->fw_reset_done = &fw_reset_done; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Issuing fw reset\n"); ++ if (fnic_fw_reset_handler(fnic)) { ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) ++ fnic->state = old_state; ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ } else { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Waiting for fw completion\n"); ++ time_remain = wait_for_completion_timeout(&fw_reset_done, ++ msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "Woken up after fw completion timeout\n"); ++ if (time_remain == 0) { ++ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, ++ "FW reset completion timed out after %d ms)\n", ++ FNIC_FW_RESET_TIMEOUT); ++ } ++ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); ++ } ++ fnic->fw_reset_done = NULL; ++ } +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) +diff --cc drivers/scsi/fnic/fnic_stats.h +index ca7ab8afa60a,8ddd20401a59..000000000000 +--- a/drivers/scsi/fnic/fnic_stats.h ++++ b/drivers/scsi/fnic/fnic_stats.h +@@@ -141,6 -171,5 +183,10 @@@ struct stats_debug_info + }; + + int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); +++<<<<<<< HEAD + +void fnic_stats_debugfs_init(struct fnic *); + +void fnic_stats_debugfs_remove(struct fnic *); +++======= ++ const char *fnic_role_to_str(unsigned int role); +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + #endif /* _FNIC_STATS_H_ */ +diff --cc drivers/scsi/fnic/fnic_trace.c +index a1d62546c584,420a25332cef..000000000000 +--- a/drivers/scsi/fnic/fnic_trace.c ++++ b/drivers/scsi/fnic/fnic_trace.c +@@@ -472,6 -467,60 +481,63 @@@ int fnic_get_stats_data(struct stats_de + + } + +++<<<<<<< HEAD +++======= ++ int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic) ++ { ++ struct fnic_iport_s *iport = &fnic->iport; ++ int buf_size = info->buf_size; ++ int len = info->buffer_len; ++ struct fnic_tport_s *tport, *next; ++ unsigned long flags; ++ ++ len += snprintf(info->debug_buffer + len, buf_size - len, ++ "------------------------------------------\n" ++ "\t\t Debug Info\n" ++ "------------------------------------------\n"); ++ len += snprintf(info->debug_buffer + len, buf_size - len, ++ "fnic Name:%s number:%d Role:%s State:%s\n", ++ fnic->name, fnic->fnic_num, ++ fnic_role_to_str(fnic->role), ++ fnic_state_to_str(fnic->state)); ++ len += ++ snprintf(info->debug_buffer + len, buf_size - len, ++ "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n", ++ iport->state, iport->flags, iport->vlan_id, iport->fcid); ++ len += ++ snprintf(info->debug_buffer + len, buf_size - len, ++ "usefip:%d fip_state:%d fip_flogi_retry:%d\n", ++ iport->usefip, iport->fip.state, iport->fip.flogi_retry); ++ len += ++ snprintf(info->debug_buffer + len, buf_size - len, ++ "fpma %02x:%02x:%02x:%02x:%02x:%02x", ++ iport->fpma[5], iport->fpma[4], iport->fpma[3], ++ iport->fpma[2], iport->fpma[1], iport->fpma[0]); ++ len += ++ snprintf(info->debug_buffer + len, buf_size - len, ++ "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n", ++ iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3], ++ iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]); ++ len += ++ snprintf(info->debug_buffer + len, buf_size - len, ++ "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n", ++ iport->fabric.state, iport->fabric.flags, ++ iport->fabric.retry_counter, iport->e_d_tov, ++ iport->r_a_tov); ++ ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ list_for_each_entry_safe(tport, next, &iport->tport_list, links) { ++ len += snprintf(info->debug_buffer + len, buf_size - len, ++ "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n", ++ tport->fcid, tport->state, tport->flags, ++ atomic_read(&tport->in_flight), ++ tport->retry_counter); ++ } ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return len; ++ } ++ +++>>>>>>> a8650a5eaaf1 (scsi: fnic: Add stats and related functionality) + /* + * fnic_trace_buf_init - Initialize fnic trace buffer logging facility + * +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h +index 5d78eea20873..3ad3afcf63d2 100644 +--- a/drivers/scsi/fnic/fnic_fdls.h ++++ b/drivers/scsi/fnic/fnic_fdls.h +@@ -305,10 +305,12 @@ struct fnic_iport_s { + uint16_t max_payload_size; + spinlock_t deleted_tport_lst_lock; + struct completion *flogi_reg_done; ++ struct fnic_iport_stats iport_stats; + char str_wwpn[20]; + char str_wwnn[20]; +- }; +- struct rport_dd_data_s { ++}; ++ ++struct rport_dd_data_s { + struct fnic_tport_s *tport; + struct fnic_iport_s *iport; + }; +* Unmerged path drivers/scsi/fnic/fnic_main.c +* Unmerged path drivers/scsi/fnic/fnic_scsi.c +* Unmerged path drivers/scsi/fnic/fnic_stats.h +* Unmerged path drivers/scsi/fnic/fnic_trace.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/b5a57f15.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/b5a57f15.failed new file mode 100644 index 0000000000000..ad60e7496c27b --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/b5a57f15.failed @@ -0,0 +1,117 @@ +scsi: fnic: Add support for target based solicited requests and responses + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit b5a57f153bdf772ed41ef286826cef7a1c52f433 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/b5a57f15.failed + +Add support for target based solicited requests and responses. + +Add support for tport definitions and processing. + +Add support for restarting the IT nexus. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202406120146.xchlZbqX-lkp@intel.com/ +Closes: https://lore.kernel.org/oe-kbuild-all/202412081427.SlsFIJY4-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-5-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit b5a57f153bdf772ed41ef286826cef7a1c52f433) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +# drivers/scsi/fnic/fnic.h +diff --cc drivers/scsi/fnic/fnic.h +index 73fb8245c7b7,1676bd8324fc..000000000000 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@@ -88,16 -82,34 +88,24 @@@ + #define FNIC_DEV_RST_TERM_DONE BIT(20) + #define FNIC_DEV_RST_ABTS_PENDING BIT(21) + +++<<<<<<< HEAD +++======= ++ #define IS_FNIC_FCP_INITIATOR(fnic) (fnic->role == FNIC_ROLE_FCP_INITIATOR) ++ ++ /* Retry supported by rport (returned by PRLI service parameters) */ ++ #define FNIC_FC_RP_FLAGS_RETRY 0x1 ++ +++>>>>>>> b5a57f153bdf (scsi: fnic: Add support for target based solicited requests and responses) + /* + - * fnic private data per SCSI command. + + * Usage of the scsi_cmnd scratchpad. + * These fields are locked by the hashed io_req_lock. + */ + -struct fnic_cmd_priv { + - struct fnic_io_req *io_req; + - enum fnic_ioreq_state state; + - u32 flags; + - u16 abts_status; + - u16 lr_status; + -}; + - + -static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) + -{ + - return scsi_cmd_priv(cmd); + -} + - + -static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) + -{ + - struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + - + - return ((u64)fcmd->flags << 32) | fcmd->state; + -} + +#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) + +#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) + +#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) + +#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) + +#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) + +#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) + + #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ + +@@@ -299,7 -337,11 +308,15 @@@ struct fnic + struct work_struct frame_work; + struct work_struct flush_work; + struct sk_buff_head frame_queue; +++<<<<<<< HEAD + + struct sk_buff_head tx_queue; +++======= ++ struct list_head tx_queue; ++ mempool_t *frame_pool; ++ mempool_t *frame_elem_pool; ++ struct work_struct tport_work; ++ struct list_head tport_event_list; +++>>>>>>> b5a57f153bdf (scsi: fnic: Add support for target based solicited requests and responses) + + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fnic.h +diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h +index 5d78eea20873..1adab07eb742 100644 +--- a/drivers/scsi/fnic/fnic_fdls.h ++++ b/drivers/scsi/fnic/fnic_fdls.h +@@ -388,7 +388,7 @@ void fdls_send_fabric_logo(struct fnic_iport_s *iport); + int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr); + void fdls_send_tport_abts(struct fnic_iport_s *iport, +- struct fnic_tport_s *tport); ++ struct fnic_tport_s *tport); + bool fdls_delete_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); + void fdls_fdmi_timer_callback(struct timer_list *t); diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bab8551e.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bab8551e.failed new file mode 100644 index 0000000000000..cafc26072334b --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bab8551e.failed @@ -0,0 +1,27 @@ +scsi: fnic: Remove unnecessary else to fix warning in FDLS FIP + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit bab8551e33f7f5e8743ccb49be41fe3228178e8a +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bab8551e.failed + +Implement review comments from Martin: + Remove unnecessary else from fip.c to fix a warning. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250106224451.3597-3-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit bab8551e33f7f5e8743ccb49be41fe3228178e8a) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fip.c +* Unmerged path drivers/scsi/fnic/fip.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bd067766.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bd067766.failed new file mode 100644 index 0000000000000..4be9742a80b03 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bd067766.failed @@ -0,0 +1,26 @@ +scsi: fnic: Remove unnecessary NUL-terminations + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Thorsten Blum +commit bd067766ee2aeb35589ad74d599b0e6311f68c73 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/bd067766.failed + +strscpy_pad() already NUL-terminates 'data' at the corresponding +indexes. Remove any unnecessary NUL-terminations. + +No functional changes intended. + + Signed-off-by: Thorsten Blum +Link: https://lore.kernel.org/r/20250314221626.43174-2-thorsten.blum@linux.dev + Reviewed-by: Karan Tilak Kumar + Signed-off-by: Martin K. Petersen +(cherry picked from commit bd067766ee2aeb35589ad74d599b0e6311f68c73) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c353e898.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c353e898.failed new file mode 100644 index 0000000000000..b798e56ae6ecc --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c353e898.failed @@ -0,0 +1,286 @@ +net: introduce per netns packet chains + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Paolo Abeni +commit c353e8983e0dea5dbba7789033326e1ad34135b7 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c353e898.failed + +Currently network taps unbound to any interface are linked in the +global ptype_all list, affecting the performance in all the network +namespaces. + +Add per netns ptypes chains, so that in the mentioned case only +the netns owning the packet socket(s) is affected. + +While at that drop the global ptype_all list: no in kernel user +registers a tap on "any" type without specifying either the target +device or the target namespace (and IMHO doing that would not make +any sense). + +Note that this adds a conditional in the fast path (to check for +per netns ptype_specific list) and increases the dataset size by +a cacheline (owing the per netns lists). + + Reviewed-by: Sabrina Dubroca + Signed-off-by: Paolo Abeni + Reviewed-by: Eric Dumazet +Link: https://patch.msgid.link/ae405f98875ee87f8150c460ad162de7e466f8a7.1742494826.git.pabeni@redhat.com + Signed-off-by: Jakub Kicinski +(cherry picked from commit c353e8983e0dea5dbba7789033326e1ad34135b7) + Signed-off-by: Jonathan Maple + +# Conflicts: +# include/net/hotdata.h +# net/core/dev.c +# net/core/hotdata.c +# net/core/net-procfs.c +# net/core/net_namespace.c +diff --cc net/core/dev.c +index d16786a809da,bcf81c3ff6a3..000000000000 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@@ -569,10 -572,18 +569,25 @@@ static inline void netdev_set_addr_lock + + static inline struct list_head *ptype_head(const struct packet_type *pt) + { +++<<<<<<< HEAD + + if (pt->type == htons(ETH_P_ALL)) + + return pt->dev ? &pt->dev->ptype_all : &ptype_all; + + else + + return pt->dev ? &pt->dev->ptype_specific : +++======= ++ if (pt->type == htons(ETH_P_ALL)) { ++ if (!pt->af_packet_net && !pt->dev) ++ return NULL; ++ ++ return pt->dev ? &pt->dev->ptype_all : ++ &pt->af_packet_net->ptype_all; ++ } ++ ++ if (pt->dev) ++ return &pt->dev->ptype_specific; ++ ++ return pt->af_packet_net ? &pt->af_packet_net->ptype_specific : +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; + } + +@@@ -2305,11 -2461,15 +2328,19 @@@ static inline bool skb_loop_sk(struct p + * + * @dev: network device to check for the presence of taps + */ +- bool dev_nit_active(struct net_device *dev) ++ bool dev_nit_active_rcu(const struct net_device *dev) + { +++<<<<<<< HEAD + + return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); +++======= ++ /* Callers may hold either RCU or RCU BH lock */ ++ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); ++ ++ return !list_empty(&dev_net(dev)->ptype_all) || ++ !list_empty(&dev->ptype_all); +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + } +- EXPORT_SYMBOL_GPL(dev_nit_active); ++ EXPORT_SYMBOL_GPL(dev_nit_active_rcu); + + /* + * Support routine. Sends outgoing frames to any network +@@@ -2318,12 -2478,12 +2349,18 @@@ + + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) + { +++<<<<<<< HEAD + + struct packet_type *ptype; +++======= ++ struct packet_type *ptype, *pt_prev = NULL; ++ struct list_head *ptype_list; +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + struct sk_buff *skb2 = NULL; + + struct packet_type *pt_prev = NULL; + + struct list_head *ptype_list = &ptype_all; + + rcu_read_lock(); ++ ptype_list = &dev_net_rcu(dev)->ptype_all; + again: + list_for_each_entry_rcu(ptype, ptype_list, list) { + if (READ_ONCE(ptype->ignore_outgoing)) +@@@ -2367,7 -2527,7 +2404,11 @@@ + pt_prev = ptype; + } + +++<<<<<<< HEAD + + if (ptype_list == &ptype_all) { +++======= ++ if (ptype_list != &dev->ptype_all) { +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + ptype_list = &dev->ptype_all; + goto again; + } +@@@ -5445,7 -5716,8 +5486,12 @@@ another_round + if (pfmemalloc) + goto skip_taps; + +++<<<<<<< HEAD + + list_for_each_entry_rcu(ptype, &ptype_all, list) { +++======= ++ list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all, ++ list) { +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + if (pt_prev) + ret = deliver_skb(skb, pt_prev, orig_dev); + pt_prev = ptype; +diff --cc net/core/net-procfs.c +index f6aa2f227416,3e92bf0f9060..000000000000 +--- a/net/core/net-procfs.c ++++ b/net/core/net-procfs.c +@@@ -175,7 -185,13 +175,17 @@@ static void *ptype_get_idx(struct seq_f + } + } + +++<<<<<<< HEAD + + list_for_each_entry_rcu(pt, &ptype_all, list) { +++======= ++ list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { ++ if (i == pos) ++ return pt; ++ ++i; ++ } ++ ++ list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) { +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + if (i == pos) + return pt; + ++i; +@@@ -222,15 -239,22 +233,33 @@@ static void *ptype_seq_next(struct seq_ + goto found; + } + } +++<<<<<<< HEAD + + + + nxt = ptype_all.next; + + goto ptype_all; + + } + + + + if (pt->type == htons(ETH_P_ALL)) { + +ptype_all: + + if (nxt != &ptype_all) +++======= ++ nxt = net->ptype_all.next; ++ goto net_ptype_all; ++ } ++ ++ if (pt->af_packet_net) { ++ net_ptype_all: ++ if (nxt != &net->ptype_all && nxt != &net->ptype_specific) +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + goto found; ++ ++ if (nxt == &net->ptype_all) { ++ /* continue with ->ptype_specific if it's not empty */ ++ nxt = net->ptype_specific.next; ++ if (nxt != &net->ptype_specific) ++ goto found; ++ } ++ + hash = 0; + nxt = ptype_base[0].next; + } else +diff --cc net/core/net_namespace.c +index 9fe7ae5e1863,b0dfdf791ece..000000000000 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@@ -307,10 -308,41 +307,30 @@@ struct net *get_net_ns_by_id(const stru + } + EXPORT_SYMBOL_GPL(get_net_ns_by_id); + + -static __net_init void preinit_net_sysctl(struct net *net) + -{ + - net->core.sysctl_somaxconn = SOMAXCONN; + - /* Limits per socket sk_omem_alloc usage. + - * TCP zerocopy regular usage needs 128 KB. + - */ + - net->core.sysctl_optmem_max = 128 * 1024; + - net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED; + - net->core.sysctl_tstamp_allow_data = 1; + -} + - + /* init code that must occur even if setup_net() is not called. */ + -static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns) + +static __net_init void preinit_net(struct net *net) + { + - refcount_set(&net->passive, 1); + - refcount_set(&net->ns.count, 1); + - ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt"); + ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt"); +++<<<<<<< HEAD +++======= ++ ++ get_random_bytes(&net->hash_mix, sizeof(u32)); ++ net->dev_base_seq = 1; ++ net->user_ns = user_ns; ++ ++ idr_init(&net->netns_ids); ++ spin_lock_init(&net->nsid_lock); ++ mutex_init(&net->ipv4.ra_mutex); ++ ++ #ifdef CONFIG_DEBUG_NET_SMALL_RTNL ++ mutex_init(&net->rtnl_mutex); ++ lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL); ++ #endif ++ ++ INIT_LIST_HEAD(&net->ptype_all); ++ INIT_LIST_HEAD(&net->ptype_specific); ++ preinit_net_sysctl(net); +++>>>>>>> c353e8983e0d (net: introduce per netns packet chains) + } + + /* +* Unmerged path include/net/hotdata.h +* Unmerged path net/core/hotdata.c +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 7ec2fece55e2..2bdc4e008c41 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -4157,7 +4157,17 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, + return 0; + } + +-bool dev_nit_active(struct net_device *dev); ++bool dev_nit_active_rcu(const struct net_device *dev); ++static inline bool dev_nit_active(const struct net_device *dev) ++{ ++ bool ret; ++ ++ rcu_read_lock(); ++ ret = dev_nit_active_rcu(dev); ++ rcu_read_unlock(); ++ return ret; ++} ++ + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); + + static inline void __dev_put(struct net_device *dev) +* Unmerged path include/net/hotdata.h +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index 555688672e67..c8333556d2c0 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -83,6 +83,9 @@ struct net { + struct llist_node defer_free_list; + struct llist_node cleanup_list; /* namespaces on death row */ + ++ struct list_head ptype_all; ++ struct list_head ptype_specific; ++ + #ifdef CONFIG_KEYS + struct key_tag *key_domain; /* Key domain of operation tag */ + #endif +* Unmerged path net/core/dev.c +* Unmerged path net/core/hotdata.c +* Unmerged path net/core/net-procfs.c +* Unmerged path net/core/net_namespace.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c81df08c.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c81df08c.failed new file mode 100644 index 0000000000000..75cff7f28cb95 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c81df08c.failed @@ -0,0 +1,1486 @@ +scsi: fnic: Add support for multiqueue (MQ) in fnic driver + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit c81df08cd2944f89921033e5f1744ae2960f4e69 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/c81df08c.failed + +Implement support for MQ in fnic driver: + +The block multiqueue layer issues IO to the fnic driver with an MQ tag. Use +the mqtag and derive a tag from it. Derive the hardware queue from the +mqtag and use it in all paths. Modify queuecommand to handle mqtag. + +Replace wq and cq indices to support MQ. Replace the zeroth queue with a +hardware queue. Implement spin locks on a per hardware queue basis. +Replace io_lock with per hardware queue spinlock. Implement out of range +tag checks. + +Allocate an io_req_table to track status of the io_req. + +Test the driver by building it, loading it, and configuring 64 queues in +UCSM. Issue IOs using Medusa on multiple fnics. Enable/disable links to +exercise the abort and clean up path. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202310300032.2awCqkfn-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Tested-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20231211173617.932990-12-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit c81df08cd2944f89921033e5f1744ae2960f4e69) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +# drivers/scsi/fnic/fnic_scsi.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 85e3a51a16fc,5ed1d897311a..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -774,9 -794,6 +774,12 @@@ static int fnic_probe(struct pci_dev *p + fnic->fw_ack_index[i] = -1; + } + +++<<<<<<< HEAD + + for (i = 0; i < FNIC_IO_LOCKS; i++) + + spin_lock_init(&fnic->io_req_lock[i]); + + +++======= +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + err = -ENOMEM; + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) +diff --cc drivers/scsi/fnic/fnic_scsi.c +index a9f65dc3f089,42807e89859c..000000000000 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@@ -414,15 -383,11 +398,22 @@@ static inline int fnic_queue_wq_copy_de + return 0; + } + +++<<<<<<< HEAD + +/* + + * fnic_queuecommand + + * Routine to send a scsi cdb + + * Called with host_lock held and interrupts disabled. + + */ + +static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) + +{ + + const int tag = scsi_cmd_to_rq(sc)->tag; +++======= ++ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) ++ { ++ struct request *const rq = scsi_cmd_to_rq(sc); ++ uint32_t mqtag = 0; ++ void (*done)(struct scsi_cmnd *) = scsi_done; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + struct fc_lport *lp = shost_priv(sc->device->host); + struct fc_rport *rport; + struct fnic_io_req *io_req = NULL; +@@@ -434,15 -399,28 +425,26 @@@ + int sg_count = 0; + unsigned long flags = 0; + unsigned long ptr; +- spinlock_t *io_lock = NULL; + int io_lock_acquired = 0; + struct fc_rport_libfc_priv *rp; ++ uint16_t hwq = 0; + +++<<<<<<< HEAD + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) +++======= ++ mqtag = blk_mq_unique_tag(rq); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ ++ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "fnic<%d>: %s: %d: fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", ++ fnic->fnic_num, __func__, __LINE__, fnic->state_flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + return SCSI_MLQUEUE_HOST_BUSY; + - } + + - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, + - "fnic<%d>: %s: %d: fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", + - fnic->fnic_num, __func__, __LINE__, fnic->state_flags); + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + - } + + rport = starget_to_rport(scsi_target(sc->device)); + if (!rport) { +@@@ -512,7 -494,7 +514,11 @@@ + sg_count = scsi_dma_map(sc); + if (sg_count < 0) { + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, +++<<<<<<< HEAD + + tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc)); +++======= ++ mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + mempool_free(io_req, fnic->io_req_pool); + goto out; + } +@@@ -557,26 -538,37 +562,53 @@@ + io_lock_acquired = 1; + io_req->port_id = rport->port_id; + io_req->start_time = jiffies; +++<<<<<<< HEAD + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_SP(sc) = (char *)io_req; + + CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; + + sc->scsi_done = done; +++======= ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; ++ fnic_priv(sc)->io_req = io_req; ++ fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; ++ io_req->sc = sc; ++ ++ if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) { ++ WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n", ++ fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag)); ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } ++ ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req; ++ io_req->tag = mqtag; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + /* create copy wq desc and enqueue it */ +- wq = &fnic->hw_copy_wq[0]; +- ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); ++ wq = &fnic->hw_copy_wq[hwq]; ++ atomic64_inc(&fnic_stats->io_stats.ios[hwq]); ++ ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq); + if (ret) { + /* + * In case another thread cancelled the request, + * refetch the pointer under the lock. + */ + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, +++<<<<<<< HEAD + + tag, sc, 0, 0, 0, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + CMD_SP(sc) = NULL; + + CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ mqtag, sc, 0, 0, 0, fnic_flags_and_state(sc)); ++ io_req = fnic_priv(sc)->io_req; ++ fnic_priv(sc)->io_req = NULL; ++ if (io_req) ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (io_req) { + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); +@@@ -603,16 -593,14 +635,21 @@@ out + sc->cmnd[5]); + + FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, +++<<<<<<< HEAD + + tag, sc, io_req, sg_count, cmd_trace, + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); +++======= ++ mqtag, sc, io_req, sg_count, cmd_trace, ++ fnic_flags_and_state(sc)); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + /* if only we issued IO, will we have the io lock */ + if (io_lock_acquired) +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + atomic_dec(&fnic->in_flight); + + /* acquire host lock before returning to SCSI */ + + spin_lock(lp->host->host_lock); + return ret; + } + +@@@ -865,14 -875,19 +924,30 @@@ static void fnic_fcpio_icmnd_cmpl_handl + return; + } + +++<<<<<<< HEAD + + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + WARN_ON_ONCE(!io_req); + + if (!io_req) { + + atomic64_inc(&fnic_stats->io_stats.ioreq_null); + + CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ io_req = fnic_priv(sc)->io_req; ++ if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) { ++ WARN(1, "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x io_req tag mismatch\n", ++ __func__, __LINE__, hwq, mqtag, tag); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ return; ++ } ++ ++ WARN_ON_ONCE(!io_req); ++ if (!io_req) { ++ atomic64_inc(&fnic_stats->io_stats.ioreq_null); ++ fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + shost_printk(KERN_ERR, fnic->lport->host, + "icmnd_cmpl io_req is null - " + "hdr status = %s tag = 0x%x sc 0x%p\n", +@@@ -894,11 -909,11 +969,17 @@@ + * set the FNIC_IO_DONE so that this doesn't get + * flagged as 'out of order' if it was not aborted + */ +++<<<<<<< HEAD + + CMD_FLAGS(sc) |= FNIC_IO_DONE; + + CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fnic_priv(sc)->flags |= FNIC_IO_DONE; ++ fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if(FCPIO_ABORTED == hdr_status) + - fnic_priv(sc)->flags |= FNIC_IO_ABORTED; + + CMD_FLAGS(sc) |= FNIC_IO_ABORTED; + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "icmnd_cmpl abts pending " +@@@ -983,8 -998,12 +1064,17 @@@ + } + + /* Break link with the SCSI command */ +++<<<<<<< HEAD + + CMD_SP(sc) = NULL; + + CMD_FLAGS(sc) |= FNIC_IO_DONE; +++======= ++ fnic_priv(sc)->io_req = NULL; ++ io_req->sc = NULL; ++ fnic_priv(sc)->flags |= FNIC_IO_DONE; ++ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; ++ ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); +@@@ -1018,9 -1036,7 +1108,13 @@@ + fnic->lport->host_stats.fcp_control_requests++; + + /* Call SCSI completion function to complete the IO */ +++<<<<<<< HEAD + + if (sc->scsi_done) + + sc->scsi_done(sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ scsi_done(sc); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + mempool_free(io_req, fnic->io_req_pool); + +@@@ -1071,36 -1087,77 +1165,102 @@@ static void fnic_fcpio_itmf_cmpl_handle + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; +- spinlock_t *io_lock; + unsigned long start_time; +++<<<<<<< HEAD +++======= ++ unsigned int hwq = cq_index; ++ unsigned int mqtag; ++ unsigned int tag; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + - fcpio_tag_id_dec(&ftag, &id); + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + + fcpio_tag_id_dec(&tag, &id); + + +++<<<<<<< HEAD + + if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { + + shost_printk(KERN_ERR, fnic->lport->host, + + "Tag out of range tag %x hdr status = %s\n", + + id, fnic_fcpio_status_to_str(hdr_status)); + + return; + + } + + + sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); +++======= ++ mqtag = id & FNIC_TAG_MASK; ++ tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK); ++ hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK); ++ ++ if (hwq != cq_index) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ __func__, __LINE__, hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "%s: %d: hdr status: %s ITMF completion on the wrong queue\n", ++ __func__, __LINE__, ++ fnic_fcpio_status_to_str(hdr_status)); ++ } ++ ++ if (tag > fnic->fnic_max_tag_id) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ __func__, __LINE__, hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "%s: %d: hdr status: %s Tag out of range\n", ++ __func__, __LINE__, ++ fnic_fcpio_status_to_str(hdr_status)); ++ return; ++ } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", ++ __func__, __LINE__, hwq, mqtag, tag, cq_index); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "%s: %d: hdr status: %s Tag out of range\n", ++ __func__, __LINE__, ++ fnic_fcpio_status_to_str(hdr_status)); ++ return; ++ } ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ /* If it is sg3utils allocated SC then tag_id ++ * is max_tag_id and SC is retrieved from io_req ++ */ ++ if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { ++ io_req = fnic->sw_copy_wq[hwq].io_req_table[tag]; ++ if (io_req) ++ sc = io_req->sc; ++ } else { ++ sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); ++ } ++ +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + WARN_ON_ONCE(!sc); + if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + shost_printk(KERN_ERR, fnic->lport->host, + "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", + - fnic_fcpio_status_to_str(hdr_status), tag); + + fnic_fcpio_status_to_str(hdr_status), id); + return; + } +++<<<<<<< HEAD + + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + WARN_ON_ONCE(!io_req); + + if (!io_req) { + + atomic64_inc(&fnic_stats->io_stats.ioreq_null); + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; +++======= ++ ++ io_req = fnic_priv(sc)->io_req; ++ WARN_ON_ONCE(!io_req); ++ if (!io_req) { ++ atomic64_inc(&fnic_stats->io_stats.ioreq_null); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + shost_printk(KERN_ERR, fnic->lport->host, + "itmf_cmpl io_req is null - " + "hdr status = %s tag = 0x%x sc 0x%p\n", +@@@ -1115,12 -1172,12 +1275,12 @@@ + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "dev reset abts cmpl recd. id %x status %s\n", + id, fnic_fcpio_status_to_str(hdr_status)); + - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; + - fnic_priv(sc)->abts_status = hdr_status; + - fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + + CMD_ABTS_STATUS(sc) = hdr_status; + + CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; + if (io_req->abts_done) + complete(io_req->abts_done); +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + } else if (id & FNIC_TAG_ABORT) { + /* Completion of abort cmd */ + switch (hdr_status) { +@@@ -1153,9 -1210,9 +1313,9 @@@ + &term_stats->terminate_failures); + break; + } + - if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { + + if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { + /* This is a late completion. Ignore it */ +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return; + } + +@@@ -1185,41 -1242,37 +1345,48 @@@ + } else { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "abts cmpl, completing IO\n"); + - fnic_priv(sc)->io_req = NULL; + + CMD_SP(sc) = NULL; + sc->result = (DID_ERROR << 16); +- +- spin_unlock_irqrestore(io_lock, flags); ++ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + - FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + - sc->device->host->host_no, id, + - sc, + - jiffies_to_msecs(jiffies - start_time), + - desc, + - (((u64)hdr_status << 40) | + - (u64)sc->cmnd[0] << 32 | + - (u64)sc->cmnd[2] << 24 | + - (u64)sc->cmnd[3] << 16 | + - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + - fnic_flags_and_state(sc)); + - scsi_done(sc); + - atomic64_dec(&fnic_stats->io_stats.active_ios); + - if (atomic64_read(&fnic->io_cmpl_skip)) + - atomic64_dec(&fnic->io_cmpl_skip); + - else + - atomic64_inc(&fnic_stats->io_stats.io_completions); + + if (sc->scsi_done) { + + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + + sc->device->host->host_no, id, + + sc, + + jiffies_to_msecs(jiffies - start_time), + + desc, + + (((u64)hdr_status << 40) | + + (u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | + + CMD_STATE(sc))); + + sc->scsi_done(sc); + + atomic64_dec(&fnic_stats->io_stats.active_ios); + + if (atomic64_read(&fnic->io_cmpl_skip)) + + atomic64_dec(&fnic->io_cmpl_skip); + + else + + atomic64_inc(&fnic_stats->io_stats.io_completions); + + } + } + + + } else if (id & FNIC_TAG_DEV_RST) { + /* Completion of device reset */ +++<<<<<<< HEAD + + CMD_LR_STATUS(sc) = hdr_status; + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; +++======= ++ fnic_priv(sc)->lr_status = hdr_status; ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), +@@@ -1232,9 -1284,9 +1399,9 @@@ + fnic_fcpio_status_to_str(hdr_status)); + return; + } + - if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { + + if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { + /* Need to wait for terminate completion */ +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), +@@@ -1260,8 -1311,8 +1427,13 @@@ + } else { + shost_printk(KERN_ERR, fnic->lport->host, + "Unexpected itmf io state %s tag %x\n", +++<<<<<<< HEAD + + fnic_ioreq_state_to_str(CMD_STATE(sc)), id); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fnic_ioreq_state_to_str(fnic_priv(sc)->state), id); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + } + + } +@@@ -1355,16 -1408,31 +1529,37 @@@ static bool fnic_cleanup_io_iter(struc + struct fnic *fnic = data; + struct fnic_io_req *io_req; + unsigned long flags = 0; +- spinlock_t *io_lock; + unsigned long start_time = 0; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; ++ uint16_t hwq = 0; ++ int tag; ++ int mqtag; + +- io_lock = fnic_io_lock_tag(fnic, tag); +- spin_lock_irqsave(io_lock, flags); ++ mqtag = blk_mq_unique_tag(rq); ++ hwq = blk_mq_unique_tag_to_hwq(mqtag); ++ tag = blk_mq_unique_tag_to_tag(mqtag); + ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; ++ +++<<<<<<< HEAD + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { +++======= ++ io_req = fnic_priv(sc)->io_req; ++ if (!io_req) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, ++ "fnic<%d>: %s: %d: hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", ++ fnic->fnic_num, __func__, __LINE__, hwq, mqtag, tag, fnic_priv(sc)->flags); ++ return true; ++ } ++ ++ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && ++ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + /* + * We will be here only when FW completes reset + * without sending completions for outstanding ios. +@@@ -1374,20 -1442,16 +1569,27 @@@ + complete(io_req->dr_done); + else if (io_req && io_req->abts_done) + complete(io_req->abts_done); +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return true; +++<<<<<<< HEAD + + } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + return true; + } +- if (!io_req) { +- spin_unlock_irqrestore(io_lock, flags); +- goto cleanup_scsi_cmd; +- } + +++<<<<<<< HEAD + + CMD_SP(sc) = NULL; + + + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fnic_priv(sc)->io_req = NULL; ++ io_req->sc = NULL; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + /* + * If there is a scsi_cmnd associated with this io_req, then +@@@ -1408,24 -1471,17 +1609,37 @@@ + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + +++<<<<<<< HEAD + + /* Complete the command to SCSI */ + + if (sc->scsi_done) { + + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) + + shost_printk(KERN_ERR, fnic->lport->host, + + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", + + tag, sc); + + + + FNIC_TRACE(fnic_cleanup_io, + + sc->device->host->host_no, tag, sc, + + jiffies_to_msecs(jiffies - start_time), + + 0, ((u64)sc->cmnd[0] << 32 | + + (u64)sc->cmnd[2] << 24 | + + (u64)sc->cmnd[3] << 16 | + + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); +++======= ++ FNIC_TRACE(fnic_cleanup_io, ++ sc->device->host->host_no, tag, sc, ++ jiffies_to_msecs(jiffies - start_time), ++ 0, ((u64)sc->cmnd[0] << 32 | ++ (u64)sc->cmnd[2] << 24 | ++ (u64)sc->cmnd[3] << 16 | ++ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), ++ fnic_flags_and_state(sc)); ++ ++ scsi_done(sc); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + + sc->scsi_done(sc); + + } + return true; + } + +@@@ -1457,11 -1513,11 +1671,11 @@@ void fnic_wq_copy_cleanup_handler(struc + if (!sc) + return; + +- io_lock = fnic_io_lock_hash(fnic, sc); +- spin_lock_irqsave(io_lock, flags); ++ hwq = blk_mq_unique_tag_to_hwq(id); ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + /* Get the IO context which this desc refers to */ + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + /* fnic interrupts are turned off by now */ + +@@@ -1470,9 -1526,11 +1684,15 @@@ + goto wq_copy_cleanup_scsi_cmd; + } + +++<<<<<<< HEAD + + CMD_SP(sc) = NULL; +++======= ++ fnic_priv(sc)->io_req = NULL; ++ io_req->sc = NULL; ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + start_time = io_req->start_time; + fnic_release_ioreq_buf(fnic, io_req, sc); +@@@ -1498,10 -1554,10 +1718,15 @@@ wq_copy_cleanup_scsi_cmd + + static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, + u32 task_req, u8 *fc_lun, +- struct fnic_io_req *io_req) ++ struct fnic_io_req *io_req, ++ unsigned int hwq) + { +++<<<<<<< HEAD + + struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; + + struct Scsi_Host *host = fnic->lport->host; +++======= ++ struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + unsigned long flags; + +@@@ -1512,15 -1568,15 +1737,15 @@@ + return 1; + } else + atomic_inc(&fnic->in_flight); + - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_unlock_irqrestore(host->host_lock, flags); + +- spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + +- if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) +- free_wq_copy_descs(fnic, wq); ++ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) ++ free_wq_copy_descs(fnic, wq, hwq); + + if (!vnic_wq_copy_desc_avail(wq)) { +- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + atomic_dec(&fnic->in_flight); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_queue_abort_io_req: failure: no descriptors\n"); +@@@ -1561,14 -1617,17 +1786,17 @@@ static bool fnic_rport_abort_io_iter(st + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct scsi_lun fc_lun; + enum fnic_ioreq_state old_ioreq_state; ++ uint16_t hwq = 0; + +- io_lock = fnic_io_lock_tag(fnic, abt_tag); +- spin_lock_irqsave(io_lock, flags); ++ abt_tag = blk_mq_unique_tag(rq); ++ hwq = blk_mq_unique_tag_to_hwq(abt_tag); ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if (!io_req || io_req->port_id != iter_data->port_id) { +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return true; + } + +@@@ -1585,8 -1644,8 +1813,13 @@@ + * Found IO that is still pending with firmware and + * belongs to rport that went away + */ +++<<<<<<< HEAD + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + return true; + } + if (io_req->abts_done) { +@@@ -1631,17 -1690,17 +1864,31 @@@ + * aborted later by scsi_eh, or cleaned up during + * lun reset + */ +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + spin_unlock_irqrestore(io_lock, flags); + + } else { + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + else + + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) ++ fnic_priv(sc)->state = old_ioreq_state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ } else { ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; ++ else ++ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + atomic64_inc(&term_stats->terminates); + iter_data->term_cnt++; + } +@@@ -1745,12 -1807,10 +1995,17 @@@ int fnic_abort_cmd(struct scsi_cmnd *sc + term_stats = &fnic->fnic_stats.term_stats; + + rport = starget_to_rport(scsi_target(sc->device)); +++<<<<<<< HEAD + + FNIC_SCSI_DBG(KERN_DEBUG, + + fnic->lport->host, + + "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", + + rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); +++======= ++ mqtag = blk_mq_unique_tag(rq); ++ hwq = blk_mq_unique_tag_to_hwq(mqtag); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + - fnic_priv(sc)->flags = FNIC_NO_FLAGS; + + CMD_FLAGS(sc) = FNIC_NO_FLAGS; + + if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + ret = FAILED; +@@@ -1767,20 -1829,19 +2024,30 @@@ + * happened, the completion wont actually complete the command + * and it will be considered as an aborted command + * + - * .io_req will not be cleared except while holding io_req_lock. + + * The CMD_SP will not be cleared except while holding io_req_lock. + */ +++<<<<<<< HEAD + + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (!io_req) { +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + goto fnic_abort_cmd_end; + } + + io_req->abts_done = &tm_done; + +++<<<<<<< HEAD + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + goto wait_pending; + } + +@@@ -1808,11 -1869,11 +2075,11 @@@ + * the completion wont be done till mid-layer, since abort + * has already started. + */ + - old_ioreq_state = fnic_priv(sc)->state; + - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + - fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + + old_ioreq_state = CMD_STATE(sc); + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + /* + * Check readiness of the remote port. If the path to remote +@@@ -1829,15 -1890,15 +2096,24 @@@ + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + +++<<<<<<< HEAD + + if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun, + + io_req)) { + + spin_lock_irqsave(io_lock, flags); + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun, ++ io_req, hwq)) { ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) ++ fnic_priv(sc)->state = old_ioreq_state; ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (io_req) + io_req->abts_done = NULL; +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + ret = FAILED; + goto fnic_abort_cmd_end; + } +@@@ -1861,21 -1922,21 +2137,31 @@@ + fnic->config.ed_tov)); + + /* Check the abort status */ +- spin_lock_irqsave(io_lock, flags); ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); +++<<<<<<< HEAD + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; +++======= ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + ret = FAILED; + goto fnic_abort_cmd_end; + } + io_req->abts_done = NULL; + + /* fw did not complete abort, timed out */ +++<<<<<<< HEAD + + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (task_req == FCPIO_ITMF_ABT_TASK) { + atomic64_inc(&abts_stats->abort_drv_timeouts); + } else { +@@@ -1888,8 -1949,8 +2174,13 @@@ + + /* IO out of order */ + +++<<<<<<< HEAD + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Issuing Host reset due to out of order IO\n"); + +@@@ -1905,11 -1966,13 +2196,19 @@@ + * free the io_req if successful. If abort fails, + * Device reset will clean the I/O. + */ +++<<<<<<< HEAD + + if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS) + + CMD_SP(sc) = NULL; + + else { +++======= ++ if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS || ++ (fnic_priv(sc)->abts_status == FCPIO_ABORTED)) { ++ fnic_priv(sc)->io_req = NULL; ++ io_req->sc = NULL; ++ } else { +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + ret = FAILED; +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + goto fnic_abort_cmd_end; + } + +@@@ -1918,19 -1982,17 +2218,19 @@@ + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + + + if (sc->scsi_done) { + /* Call SCSI completion function to complete the IO */ + - sc->result = DID_ABORT << 16; + - scsi_done(sc); + - atomic64_dec(&fnic_stats->io_stats.active_ios); + - if (atomic64_read(&fnic->io_cmpl_skip)) + - atomic64_dec(&fnic->io_cmpl_skip); + - else + - atomic64_inc(&fnic_stats->io_stats.io_completions); + + sc->result = (DID_ABORT << 16); + + sc->scsi_done(sc); + + atomic64_dec(&fnic_stats->io_stats.active_ios); + + if (atomic64_read(&fnic->io_cmpl_skip)) + + atomic64_dec(&fnic->io_cmpl_skip); + + else + + atomic64_inc(&fnic_stats->io_stats.io_completions); + + } + + fnic_abort_cmd_end: +- FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, ++ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | +@@@ -1948,26 -2010,31 +2248,49 @@@ static inline int fnic_queue_dr_io_req( + struct scsi_cmnd *sc, + struct fnic_io_req *io_req) + { +++<<<<<<< HEAD + + struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; + + struct Scsi_Host *host = fnic->lport->host; + + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + + struct scsi_lun fc_lun; + + int ret = 0; + + unsigned long intr_flags; + + + + spin_lock_irqsave(host->host_lock, intr_flags); + + if (unlikely(fnic_chk_state_flags_locked(fnic, + + FNIC_FLAGS_IO_BLOCKED))) { + + spin_unlock_irqrestore(host->host_lock, intr_flags); + + return FAILED; + + } else + + atomic_inc(&fnic->in_flight); + + spin_unlock_irqrestore(host->host_lock, intr_flags); +++======= ++ struct vnic_wq_copy *wq; ++ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; ++ struct scsi_lun fc_lun; ++ int ret = 0; ++ unsigned long flags; ++ uint16_t hwq = 0; ++ uint32_t tag = 0; + +- spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); ++ tag = io_req->tag; ++ hwq = blk_mq_unique_tag_to_hwq(tag); ++ wq = &fnic->hw_copy_wq[hwq]; + +- if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) +- free_wq_copy_descs(fnic, wq); ++ spin_lock_irqsave(&fnic->fnic_lock, flags); ++ if (unlikely(fnic_chk_state_flags_locked(fnic, ++ FNIC_FLAGS_IO_BLOCKED))) { ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); ++ return FAILED; ++ } else ++ atomic_inc(&fnic->in_flight); ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ ++ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) ++ free_wq_copy_descs(fnic, wq, hwq); + + if (!vnic_wq_copy_desc_avail(wq)) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +@@@ -2021,11 -2090,13 +2345,19 @@@ static bool fnic_pending_aborts_iter(st + if (sc == iter_data->lr_sc || sc->device != lun_dev) + return true; + +++<<<<<<< HEAD + + io_lock = fnic_io_lock_tag(fnic, abt_tag); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ abt_tag = blk_mq_unique_tag(rq); ++ hwq = blk_mq_unique_tag_to_hwq(abt_tag); ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (!io_req) { +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return true; + } + +@@@ -2035,14 -2106,14 +2367,19 @@@ + */ + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Found IO in %s on lun\n", + - fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + +++<<<<<<< HEAD + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + return true; + } + - if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + - (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { + + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && + + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s dev rst not pending sc 0x%p\n", __func__, + sc); +@@@ -2066,66 -2137,67 +2403,110 @@@ + + BUG_ON(io_req->abts_done); + +++<<<<<<< HEAD + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + + abt_tag |= FNIC_TAG_DEV_RST; +++======= ++ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s: dev rst sc 0x%p\n", __func__, sc); + } + + - fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + io_req->abts_done = &tm_done; +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + /* Now queue the abort command to firmware */ + int_to_scsilun(sc->device->lun, &fc_lun); + + if (fnic_queue_abort_io_req(fnic, abt_tag, + FCPIO_ITMF_ABT_TASK_TERM, +++<<<<<<< HEAD + + fc_lun.scsi_lun, io_req)) { + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if (io_req) + + io_req->abts_done = NULL; + + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + + CMD_STATE(sc) = old_ioreq_state; + + spin_unlock_irqrestore(io_lock, flags); + + iter_data->ret = FAILED; + + return false; + + } else { + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fc_lun.scsi_lun, io_req, hwq)) { ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; ++ if (io_req) ++ io_req->abts_done = NULL; ++ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) ++ fnic_priv(sc)->state = old_ioreq_state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ iter_data->ret = FAILED; ++ return false; ++ } else { ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + } + - fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + + wait_for_completion_timeout(&tm_done, msecs_to_jiffies + (fnic->config.ed_tov)); + + /* Recheck cmd state to check if it is now aborted */ +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); + + if (!io_req) { + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; ++ if (!io_req) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + return true; + } + + io_req->abts_done = NULL; + + /* if abort is still pending with fw, fail */ +++<<<<<<< HEAD + + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { + + spin_unlock_irqrestore(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; +++======= ++ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + iter_data->ret = FAILED; + return false; + } + - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + + /* original sc used for lr is handled by dev reset code */ +++<<<<<<< HEAD + + if (sc != iter_data->lr_sc) + + CMD_SP(sc) = NULL; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ if (sc != iter_data->lr_sc) { ++ fnic_priv(sc)->io_req = NULL; ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL; ++ } ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + /* original sc used for lr is handled by dev reset code */ + if (sc != iter_data->lr_sc) { +@@@ -2204,10 -2273,10 +2584,11 @@@ int fnic_device_reset(struct scsi_cmnd + struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct reset_stats *reset_stats; +- int tag = rq->tag; ++ int mqtag = rq->tag; + DECLARE_COMPLETION_ONSTACK(tm_done); + + int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ + bool new_sc = 0; ++ uint16_t hwq = 0; + + /* Wait for rport to unblock */ + fc_block_scsi_eh(sc); +@@@ -2235,23 -2304,26 +2616,36 @@@ + goto fnic_device_reset_end; + } + + - fnic_priv(sc)->flags = FNIC_DEVICE_RESET; + + CMD_FLAGS(sc) = FNIC_DEVICE_RESET; + + /* Allocate tag if not present */ + +- if (unlikely(tag < 0)) { ++ if (unlikely(mqtag < 0)) { + /* + - * For device reset issued through sg3utils, we let + - * only one LUN_RESET to go through and use a special + - * tag equal to max_tag_id so that we don't have to allocate + - * or free it. It won't interact with tags + - * allocated by mid layer. + + * Really should fix the midlayer to pass in a proper + + * request for ioctls... + */ +++<<<<<<< HEAD + + tag = fnic_scsi_host_start_tag(fnic, sc); + + if (unlikely(tag == SCSI_NO_TAG)) + + goto fnic_device_reset_end; + + tag_gen_flag = 1; + + new_sc = 1; + + } + + io_lock = fnic_io_lock_hash(fnic, sc); + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ mutex_lock(&fnic->sgreset_mutex); ++ mqtag = fnic->fnic_max_tag_id; ++ new_sc = 1; ++ } else { ++ mqtag = blk_mq_unique_tag(rq); ++ hwq = blk_mq_unique_tag_to_hwq(mqtag); ++ } ++ ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + /* + * If there is a io_req attached to this command, then use it, +@@@ -2265,29 -2337,38 +2659,58 @@@ + } + memset(io_req, 0, sizeof(*io_req)); + io_req->port_id = rport->port_id; +++<<<<<<< HEAD + + CMD_SP(sc) = (char *)io_req; + + } + + io_req->dr_done = &tm_done; + + CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; + + CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ io_req->tag = mqtag; ++ fnic_priv(sc)->io_req = io_req; ++ io_req->sc = sc; ++ ++ if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) ++ WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n", ++ fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); ++ ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = ++ io_req; ++ } ++ io_req->dr_done = &tm_done; ++ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; ++ fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + +- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", mqtag); + + /* + * issue the device reset, if enqueue failed, clean up the ioreq + * and break assoc with scsi cmd + */ + if (fnic_queue_dr_io_req(fnic, sc, io_req)) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (io_req) + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + + /* + * Wait on the local completion for LUN reset. The io_req may be +@@@ -2296,12 -2377,12 +2719,17 @@@ + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (!io_req) { +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +- "io_req is null tag 0x%x sc 0x%p\n", tag, sc); ++ "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); + goto fnic_device_reset_end; + } + io_req->dr_done = NULL; +@@@ -2316,42 -2397,42 +2744,65 @@@ + atomic64_inc(&reset_stats->device_reset_timeouts); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset timed out\n"); +++<<<<<<< HEAD + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + int_to_scsilun(sc->device->lun, &fc_lun); + /* + * Issue abort and terminate on device reset request. + * If q'ing of terminate fails, retry it after a delay. + */ + while (1) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + break; + } +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + if (fnic_queue_abort_io_req(fnic, +- tag | FNIC_TAG_DEV_RST, ++ mqtag | FNIC_TAG_DEV_RST, + FCPIO_ITMF_ABT_TASK_TERM, +- fc_lun.scsi_lun, io_req)) { ++ fc_lun.scsi_lun, io_req, hwq)) { + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); + } else { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; ++ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + io_req->abts_done = &tm_done; +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +- "Abort and terminate issued on Device reset " +- "tag 0x%x sc 0x%p\n", tag, sc); ++ "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n", ++ mqtag, sc); + break; + } + } + while (1) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + wait_for_completion_timeout(&tm_done, + msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + break; +@@@ -2383,8 -2464,8 +2834,13 @@@ + * succeeds + */ + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Device reset failed" + " since could not abort all IOs\n"); +@@@ -2392,17 -2473,20 +2848,30 @@@ + } + + /* Clean lun reset command */ +++<<<<<<< HEAD + + spin_lock_irqsave(io_lock, flags); + + io_req = (struct fnic_io_req *)CMD_SP(sc); +++======= ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); ++ io_req = fnic_priv(sc)->io_req; +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (io_req) + /* Completed, and successful */ + ret = SUCCESS; + + fnic_device_reset_clean: +++<<<<<<< HEAD + + if (io_req) + + CMD_SP(sc) = NULL; +++======= ++ if (io_req) { ++ fnic_priv(sc)->io_req = NULL; ++ io_req->sc = NULL; ++ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL; ++ } +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + if (io_req) { + start_time = io_req->start_time; +@@@ -2657,12 -2747,11 +3131,11 @@@ static bool fnic_abts_pending_iter(stru + if (iter_data->lun_dev && sc->device != iter_data->lun_dev) + return true; + +- io_lock = fnic_io_lock_hash(fnic, sc); +- spin_lock_irqsave(io_lock, flags); ++ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + - io_req = fnic_priv(sc)->io_req; + + io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!io_req) { +- spin_unlock_irqrestore(io_lock, flags); ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return true; + } + +@@@ -2672,9 -2761,9 +3145,15 @@@ + */ + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "Found IO in %s on lun\n", +++<<<<<<< HEAD + + fnic_ioreq_state_to_str(CMD_STATE(sc))); + + cmd_state = CMD_STATE(sc); + + spin_unlock_irqrestore(io_lock, flags); +++======= ++ fnic_ioreq_state_to_str(fnic_priv(sc)->state)); ++ cmd_state = fnic_priv(sc)->state; ++ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); +++>>>>>>> c81df08cd294 (scsi: fnic: Add support for multiqueue (MQ) in fnic driver) + if (cmd_state == FNIC_IOREQ_ABTS_PENDING) + iter_data->ret = 1; + +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h +index 9ebca3720f82..87ac314ffce1 100644 +--- a/drivers/scsi/fnic/fnic.h ++++ b/drivers/scsi/fnic/fnic.h +@@ -48,7 +48,6 @@ + #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ + #define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ + #define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ +-#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ + #define FNIC_DFLT_QUEUE_DEPTH 256 + #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ + +@@ -292,7 +291,6 @@ struct fnic { + struct fnic_host_tag *tags; + mempool_t *io_req_pool; + mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES]; +- spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */ + + unsigned int copy_wq_base; + struct work_struct link_work; +* Unmerged path drivers/scsi/fnic/fnic_main.c +* Unmerged path drivers/scsi/fnic/fnic_scsi.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/cffd0441.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/cffd0441.failed new file mode 100644 index 0000000000000..3822b3801e766 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/cffd0441.failed @@ -0,0 +1,328 @@ +use uniform permission checks for all mount propagation changes + +jira LE-4311 +cve CVE-2025-38498 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Al Viro +commit cffd0441872e7f6b1fce5e78fb1c99187a291330 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/cffd0441.failed + +do_change_type() and do_set_group() are operating on different +aspects of the same thing - propagation graph. The latter +asks for mounts involved to be mounted in namespace(s) the caller +has CAP_SYS_ADMIN for. The former is a mess - originally it +didn't even check that mount *is* mounted. That got fixed, +but the resulting check turns out to be too strict for userland - +in effect, we check that mount is in our namespace, having already +checked that we have CAP_SYS_ADMIN there. + +What we really need (in both cases) is + * only touch mounts that are mounted. That's a must-have +constraint - data corruption happens if it get violated. + * don't allow to mess with a namespace unless you already +have enough permissions to do so (i.e. CAP_SYS_ADMIN in its userns). + +That's an equivalent of what do_set_group() does; let's extract that +into a helper (may_change_propagation()) and use it in both +do_set_group() and do_change_type(). + +Fixes: 12f147ddd6de "do_change_type(): refuse to operate on unmounted/not ours mounts" + Acked-by: Andrei Vagin + Reviewed-by: Pavel Tikhomirov + Tested-by: Pavel Tikhomirov + Reviewed-by: Christian Brauner + Signed-off-by: Al Viro +(cherry picked from commit cffd0441872e7f6b1fce5e78fb1c99187a291330) + Signed-off-by: Jonathan Maple + +# Conflicts: +# fs/namespace.c +diff --cc fs/namespace.c +index 4ec9c03ab924,88db58061919..000000000000 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@@ -2278,9 -2856,22 +2278,22 @@@ static int graft_tree(struct mount *mnt + d_is_dir(mnt->mnt.mnt_root)) + return -ENOTDIR; + + - return attach_recursive_mnt(mnt, p, mp); + + return attach_recursive_mnt(mnt, p, mp, false); + } + ++ static int may_change_propagation(const struct mount *m) ++ { ++ struct mnt_namespace *ns = m->mnt_ns; ++ ++ // it must be mounted in some namespace ++ if (IS_ERR_OR_NULL(ns)) // is_mounted() ++ return -EINVAL; ++ // and the caller must be admin in userns of that namespace ++ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) ++ return -EPERM; ++ return 0; ++ } ++ + /* + * Sanity check the flags to change_mnt_propagation. + */ +@@@ -2671,29 -3347,238 +2684,121 @@@ static inline int tree_contains_unbinda + return 0; + } + +++<<<<<<< HEAD +++======= ++ static int do_set_group(struct path *from_path, struct path *to_path) ++ { ++ struct mount *from, *to; ++ int err; ++ ++ from = real_mount(from_path->mnt); ++ to = real_mount(to_path->mnt); ++ ++ namespace_lock(); ++ ++ err = may_change_propagation(from); ++ if (err) ++ goto out; ++ err = may_change_propagation(to); ++ if (err) ++ goto out; ++ ++ err = -EINVAL; ++ /* To and From paths should be mount roots */ ++ if (!path_mounted(from_path)) ++ goto out; ++ if (!path_mounted(to_path)) ++ goto out; ++ ++ /* Setting sharing groups is only allowed across same superblock */ ++ if (from->mnt.mnt_sb != to->mnt.mnt_sb) ++ goto out; ++ ++ /* From mount root should be wider than To mount root */ ++ if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) ++ goto out; ++ ++ /* From mount should not have locked children in place of To's root */ ++ if (__has_locked_children(from, to->mnt.mnt_root)) ++ goto out; ++ ++ /* Setting sharing groups is only allowed on private mounts */ ++ if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) ++ goto out; ++ ++ /* From should not be private */ ++ if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) ++ goto out; ++ ++ if (IS_MNT_SLAVE(from)) { ++ hlist_add_behind(&to->mnt_slave, &from->mnt_slave); ++ to->mnt_master = from->mnt_master; ++ } ++ ++ if (IS_MNT_SHARED(from)) { ++ to->mnt_group_id = from->mnt_group_id; ++ list_add(&to->mnt_share, &from->mnt_share); ++ set_mnt_shared(to); ++ } ++ ++ err = 0; ++ out: ++ namespace_unlock(); ++ return err; ++ } ++ ++ /** ++ * path_overmounted - check if path is overmounted ++ * @path: path to check ++ * ++ * Check if path is overmounted, i.e., if there's a mount on top of ++ * @path->mnt with @path->dentry as mountpoint. ++ * ++ * Context: namespace_sem must be held at least shared. ++ * MUST NOT be called under lock_mount_hash() (there one should just ++ * call __lookup_mnt() and check if it returns NULL). ++ * Return: If path is overmounted true is returned, false if not. ++ */ ++ static inline bool path_overmounted(const struct path *path) ++ { ++ unsigned seq = read_seqbegin(&mount_lock); ++ bool no_child; ++ ++ rcu_read_lock(); ++ no_child = !__lookup_mnt(path->mnt, path->dentry); ++ rcu_read_unlock(); ++ if (need_seqretry(&mount_lock, seq)) { ++ read_seqlock_excl(&mount_lock); ++ no_child = !__lookup_mnt(path->mnt, path->dentry); ++ read_sequnlock_excl(&mount_lock); ++ } ++ return unlikely(!no_child); ++ } ++ +++>>>>>>> cffd0441872e (use uniform permission checks for all mount propagation changes) + /* + - * Check if there is a possibly empty chain of descent from p1 to p2. + - * Locks: namespace_sem (shared) or mount_lock (read_seqlock_excl). + - */ + -static bool mount_is_ancestor(const struct mount *p1, const struct mount *p2) + -{ + - while (p2 != p1 && mnt_has_parent(p2)) + - p2 = p2->mnt_parent; + - return p2 == p1; + -} + - + -/** + - * can_move_mount_beneath - check that we can mount beneath the top mount + - * @from: mount to mount beneath + - * @to: mount under which to mount + - * @mp: mountpoint of @to + - * + - * - Make sure that @to->dentry is actually the root of a mount under + - * which we can mount another mount. + - * - Make sure that nothing can be mounted beneath the caller's current + - * root or the rootfs of the namespace. + - * - Make sure that the caller can unmount the topmost mount ensuring + - * that the caller could reveal the underlying mountpoint. + - * - Ensure that nothing has been mounted on top of @from before we + - * grabbed @namespace_sem to avoid creating pointless shadow mounts. + - * - Prevent mounting beneath a mount if the propagation relationship + - * between the source mount, parent mount, and top mount would lead to + - * nonsensical mount trees. + - * + - * Context: This function expects namespace_lock() to be held. + - * Return: On success 0, and on error a negative error code is returned. + - */ + -static int can_move_mount_beneath(const struct path *from, + - const struct path *to, + - const struct mountpoint *mp) + -{ + - struct mount *mnt_from = real_mount(from->mnt), + - *mnt_to = real_mount(to->mnt), + - *parent_mnt_to = mnt_to->mnt_parent; + - + - if (!mnt_has_parent(mnt_to)) + - return -EINVAL; + - + - if (!path_mounted(to)) + - return -EINVAL; + - + - if (IS_MNT_LOCKED(mnt_to)) + - return -EINVAL; + - + - /* Avoid creating shadow mounts during mount propagation. */ + - if (path_overmounted(from)) + - return -EINVAL; + - + - /* + - * Mounting beneath the rootfs only makes sense when the + - * semantics of pivot_root(".", ".") are used. + - */ + - if (&mnt_to->mnt == current->fs->root.mnt) + - return -EINVAL; + - if (parent_mnt_to == current->nsproxy->mnt_ns->root) + - return -EINVAL; + - + - if (mount_is_ancestor(mnt_to, mnt_from)) + - return -EINVAL; + - + - /* + - * If the parent mount propagates to the child mount this would + - * mean mounting @mnt_from on @mnt_to->mnt_parent and then + - * propagating a copy @c of @mnt_from on top of @mnt_to. This + - * defeats the whole purpose of mounting beneath another mount. + - */ + - if (propagation_would_overmount(parent_mnt_to, mnt_to, mp)) + - return -EINVAL; + - + - /* + - * If @mnt_to->mnt_parent propagates to @mnt_from this would + - * mean propagating a copy @c of @mnt_from on top of @mnt_from. + - * Afterwards @mnt_from would be mounted on top of + - * @mnt_to->mnt_parent and @mnt_to would be unmounted from + - * @mnt->mnt_parent and remounted on @mnt_from. But since @c is + - * already mounted on @mnt_from, @mnt_to would ultimately be + - * remounted on top of @c. Afterwards, @mnt_from would be + - * covered by a copy @c of @mnt_from and @c would be covered by + - * @mnt_from itself. This defeats the whole purpose of mounting + - * @mnt_from beneath @mnt_to. + - */ + - if (check_mnt(mnt_from) && + - propagation_would_overmount(parent_mnt_to, mnt_from, mp)) + - return -EINVAL; + - + - return 0; + -} + - + -/* may_use_mount() - check if a mount tree can be used + - * @mnt: vfsmount to be used + - * + - * This helper checks if the caller may use the mount tree starting + - * from @path->mnt. The caller may use the mount tree under the + - * following circumstances: + - * + - * (1) The caller is located in the mount namespace of the mount tree. + - * This also implies that the mount does not belong to an anonymous + - * mount namespace. + - * (2) The caller is trying to use a mount tree that belongs to an + - * anonymous mount namespace. + - * + - * For that to be safe, this helper enforces that the origin mount + - * namespace the anonymous mount namespace was created from is the + - * same as the caller's mount namespace by comparing the sequence + - * numbers. + - * + - * The ownership of a non-anonymous mount namespace such as the + - * caller's cannot change. + - * => We know that the caller's mount namespace is stable. + - * + - * If the origin sequence number of the anonymous mount namespace is + - * the same as the sequence number of the caller's mount namespace. + - * => The owning namespaces are the same. + - * + - * ==> The earlier capability check on the owning namespace of the + - * caller's mount namespace ensures that the caller has the + - * ability to use the mount tree. + - * + - * Returns true if the mount tree can be used, false otherwise. + + * Check that there aren't references to earlier/same mount namespaces in the + + * specified subtree. Such references can act as pins for mount namespaces + + * that aren't checked by the mount-cycle checking code, thereby allowing + + * cycles to be made. + */ + -static inline bool may_use_mount(struct mount *mnt) + +static bool check_for_nsfs_mounts(struct mount *subtree) + { + - if (check_mnt(mnt)) + - return true; + + struct mount *p; + + bool ret = false; + + - /* + - * Make sure that noone unmounted the target path or somehow + - * managed to get their hands on something purely kernel + - * internal. + - */ + - if (!is_mounted(&mnt->mnt)) + - return false; + + lock_mount_hash(); + + for (p = subtree; p; p = next_mnt(p, subtree)) + + if (mnt_ns_loop(p->mnt.mnt_root)) + + goto out; + + - return check_anonymous_mnt(mnt); + + ret = true; + +out: + + unlock_mount_hash(); + + return ret; + } + + -static int do_move_mount(struct path *old_path, + - struct path *new_path, enum mnt_tree_flags_t flags) + +static int do_move_mount(struct path *old_path, struct path *new_path) + { + struct mnt_namespace *ns; + struct mount *p; +* Unmerged path fs/namespace.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e2813fc2.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e2813fc2.failed new file mode 100644 index 0000000000000..72f1732398f0a --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e2813fc2.failed @@ -0,0 +1,293 @@ +scsi: fnic: Replace shost_printk() with dev_info()/dev_err() + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit e2813fc27d274747fa6e204e135e3c89cc6426a3 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e2813fc2.failed + +Sending host information to shost_printk() prior to host initialization in +fnic is unnecessary. Replace shost_printk() and a printk() prior to this +initialization with dev_info() and dev_err() accordingly. + + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Hannes Reinecke + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-2-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit e2813fc27d274747fa6e204e135e3c89cc6426a3) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fnic_main.c +diff --cc drivers/scsi/fnic/fnic_main.c +index 06fd7b543b33,471a156b074e..000000000000 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@@ -401,11 -387,10 +395,10 @@@ static int fnic_notify_set(struct fnic + err = vnic_dev_notify_set(fnic->vdev, -1); + break; + case VNIC_DEV_INTR_MODE_MSIX: + - err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base); + + err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); + break; + default: +- shost_printk(KERN_ERR, fnic->lport->host, +- "Interrupt mode should be set up" ++ dev_err(&fnic->pdev->dev, "Interrupt mode should be set up" + " before devcmd notify set %d\n", + vnic_dev_get_intr_mode(fnic->vdev)); + err = -1; +@@@ -574,18 -559,11 +567,16 @@@ static int fnic_scsi_drv_init(struct fn + host->max_cmd_len = FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + + if (host->nr_hw_queues > 1) + + shost_printk(KERN_ERR, host, + + "fnic: blk-mq is not supported"); + + + + host->nr_hw_queues = fnic->wq_copy_count = 1; + +- shost_printk(KERN_INFO, host, +- "fnic: can_queue: %d max_lun: %llu", ++ dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + +- shost_printk(KERN_INFO, host, +- "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", ++ dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + + return 0; +@@@ -634,15 -639,9 +625,14 @@@ static int fnic_probe(struct pci_dev *p + fnic->fnic_num = fnic_id; + fnic_stats_debugfs_init(fnic); + + + /* Setup PCI resources */ + + pci_set_drvdata(pdev, fnic); + + + + fnic->pdev = pdev; + + + err = pci_enable_device(pdev); + if (err) { +- shost_printk(KERN_ERR, fnic->lport->host, +- "Cannot enable PCI device, aborting.\n"); ++ dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n"); + goto err_out_free_hba; + } + +@@@ -760,6 -748,16 +738,19 @@@ + goto err_out_clear_intr; + } + +++<<<<<<< HEAD +++======= ++ fnic_scsi_drv_init(fnic); ++ ++ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { ++ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; ++ fnic->sw_copy_wq[hwq].io_req_table = ++ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * ++ sizeof(struct fnic_io_req *), GFP_KERNEL); ++ } ++ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", ++ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); +++>>>>>>> e2813fc27d27 (scsi: fnic: Replace shost_printk() with dev_info()/dev_err()) + + /* initialize all fnic locks */ + spin_lock_init(&fnic->fnic_lock); +@@@ -844,27 -836,41 +832,50 @@@ + + /* allocate RQ buffers and post them to RQ*/ + for (i = 0; i < fnic->rq_count; i++) { + + vnic_rq_enable(&fnic->rq[i]); + err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); + if (err) { +- shost_printk(KERN_ERR, fnic->lport->host, +- "fnic_alloc_rq_frame can't alloc " ++ dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc " + "frame\n"); + - goto err_out_rq_buf; + + goto err_out_free_rq_buf; + } + } + +++<<<<<<< HEAD +++======= ++ /* Enable all queues */ ++ for (i = 0; i < fnic->raw_wq_count; i++) ++ vnic_wq_enable(&fnic->wq[i]); ++ for (i = 0; i < fnic->rq_count; i++) { ++ if (!ioread32(&fnic->rq[i].ctrl->enable)) ++ vnic_rq_enable(&fnic->rq[i]); ++ } ++ for (i = 0; i < fnic->wq_copy_count; i++) ++ vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); ++ ++ err = fnic_request_intr(fnic); ++ if (err) { ++ dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); ++ goto err_out_request_intr; ++ } ++ +++>>>>>>> e2813fc27d27 (scsi: fnic: Replace shost_printk() with dev_info()/dev_err()) + /* + * Initialization done with PCI system, hardware, firmware. + * Add host to SCSI + */ + err = scsi_add_host(lp->host, &pdev->dev); + if (err) { +++<<<<<<< HEAD + + shost_printk(KERN_ERR, fnic->lport->host, + + "fnic: scsi_add_host failed...exiting\n"); + + goto err_out_free_rq_buf; +++======= ++ dev_err(&fnic->pdev->dev, "fnic: scsi_add_host failed...exiting\n"); ++ goto err_out_scsi_add_host; +++>>>>>>> e2813fc27d27 (scsi: fnic: Replace shost_printk() with dev_info()/dev_err()) + } + + - + /* Start local port initiatialization */ + + lp->link_up = 0; +* Unmerged path drivers/scsi/fnic/fnic_main.c +diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c +index f7c2ee009426..308323b89d49 100644 +--- a/drivers/scsi/fnic/fnic_res.c ++++ b/drivers/scsi/fnic/fnic_res.c +@@ -42,9 +42,7 @@ int fnic_get_vnic_config(struct fnic *fnic) + offsetof(struct vnic_fc_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ +- shost_printk(KERN_ERR, fnic->lport->host, \ +- "Error getting %s, %d\n", #m, \ +- err); \ ++ dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \ + return err; \ + } \ + } while (0); +@@ -151,40 +149,29 @@ int fnic_get_vnic_config(struct fnic *fnic) + + c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count); + +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC MAC addr %pM " ++ dev_info(&fnic->pdev->dev, "vNIC MAC addr %pM " + "wq/wq_copy/rq %d/%d/%d\n", + fnic->ctlr.ctl_src_addr, + c->wq_enet_desc_count, c->wq_copy_desc_count, + c->rq_desc_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC node wwn %llx port wwn %llx\n", ++ dev_info(&fnic->pdev->dev, "vNIC node wwn %llx port wwn %llx\n", + c->node_wwn, c->port_wwn); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC ed_tov %d ra_tov %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC ed_tov %d ra_tov %d\n", + c->ed_tov, c->ra_tov); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC mtu %d intr timer %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC mtu %d intr timer %d\n", + c->maxdatafieldsize, c->intr_timer); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC flags 0x%x luns per tgt %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC flags 0x%x luns per tgt %d\n", + c->flags, c->luns_per_tgt); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC flogi_retries %d flogi timeout %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC flogi_retries %d flogi timeout %d\n", + c->flogi_retries, c->flogi_timeout); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC plogi retries %d plogi timeout %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC plogi retries %d plogi timeout %d\n", + c->plogi_retries, c->plogi_timeout); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC io throttle count %d link dn timeout %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC io throttle count %d link dn timeout %d\n", + c->io_throttle_count, c->link_down_timeout); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC port dn io retries %d port dn timeout %d\n", ++ dev_info(&fnic->pdev->dev, "vNIC port dn io retries %d port dn timeout %d\n", + c->port_down_io_retries, c->port_down_timeout); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC wq_copy_count: %d\n", c->wq_copy_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC intr mode: %d\n", c->intr_mode); ++ dev_info(&fnic->pdev->dev, "vNIC wq_copy_count: %d\n", c->wq_copy_count); ++ dev_info(&fnic->pdev->dev, "vNIC intr mode: %d\n", c->intr_mode); + + return 0; + } +@@ -218,18 +205,12 @@ void fnic_get_res_counts(struct fnic *fnic) + fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, + RES_TYPE_INTR_CTRL); + +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC fw resources wq_count: %d\n", fnic->wq_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC fw resources rq_count: %d\n", fnic->rq_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC fw resources cq_count: %d\n", fnic->cq_count); +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC fw resources intr_count: %d\n", fnic->intr_count); ++ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count); ++ dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); ++ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); ++ dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count); ++ dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count); ++ dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count); + } + + void fnic_free_vnic_resources(struct fnic *fnic) +@@ -265,19 +246,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) + + intr_mode = vnic_dev_get_intr_mode(fnic->vdev); + +- shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", ++ dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n", + intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : + intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : + intr_mode == VNIC_DEV_INTR_MODE_MSIX ? + "MSI-X" : "unknown"); + +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d", ++ dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d", + fnic->wq_count, fnic->wq_copy_count, + fnic->raw_wq_count, fnic->rq_count); + +- shost_printk(KERN_INFO, fnic->lport->host, +- "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n", ++ dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n", + fnic->cq_count, fnic->intr_count, + fnic->config.wq_copy_desc_count); + +@@ -352,8 +331,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) + RES_TYPE_INTR_PBA_LEGACY, 0); + + if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { +- shost_printk(KERN_ERR, fnic->lport->host, +- "Failed to hook legacy pba resource\n"); ++ dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n"); + err = -ENODEV; + goto err_out_cleanup; + } +@@ -456,8 +434,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) + /* init the stats memory by making the first call here */ + err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); + if (err) { +- shost_printk(KERN_ERR, fnic->lport->host, +- "vnic_dev_stats_dump failed - x%x\n", err); ++ dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err); + goto err_out_cleanup; + } + diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e984fa25.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e984fa25.failed new file mode 100644 index 0000000000000..60a5ebbc88706 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e984fa25.failed @@ -0,0 +1,29 @@ +scsi: fnic: Replace use of sizeof with standard usage + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit e984fa2542e1308d67140bd7a76f678dabbcd9a8 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/e984fa25.failed + +Remove cast and replace use of sizeof(struct) with standard usage of +sizeof. + + Suggested-by: Dan Carpenter +Fixes: a63e78eb2b0f ("scsi: fnic: Add support for fabric based solicited requests and responses") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250225215056.4899-1-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit e984fa2542e1308d67140bd7a76f678dabbcd9a8) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f421692b.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f421692b.failed new file mode 100644 index 0000000000000..034cfea96f492 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f421692b.failed @@ -0,0 +1,31 @@ +scsi: fnic: Remove unnecessary spinlock locking and unlocking + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit f421692be10133a66a3a8a7d5c76fe9713ea5a8b +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f421692b.failed + +Remove unnecessary locking and unlocking of spinlock in +fdls_schedule_oxid_free_retry_work(). This will shorten the time in the +critical section. + + Suggested-by: Dan Carpenter +Fixes: a63e78eb2b0f ("scsi: fnic: Add support for fabric based solicited requests and responses") + Reviewed-by: Sesidhar Baddela + Reviewed-by: Arulprabhu Ponnusamy + Reviewed-by: Gian Carlo Boffa + Reviewed-by: Arun Easi + Tested-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20250301013712.3115-2-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit f421692be10133a66a3a8a7d5c76fe9713ea5a8b) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f828af44.failed b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f828af44.failed new file mode 100644 index 0000000000000..c6904a95494de --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f828af44.failed @@ -0,0 +1,34 @@ +scsi: fnic: Add support for unsolicited requests and responses + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.46.1.el9_6 +commit-author Karan Tilak Kumar +commit f828af44b8ddef3500fda70ef1f6daffe97db36b +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/f828af44.failed + +Add support for unsolicited requests and responses. + +Add support to accept and reject frames. + + Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202409291705.MugERX98-lkp@intel.com/ + Reviewed-by: Sesidhar Baddela +Co-developed-by: Gian Carlo Boffa + Signed-off-by: Gian Carlo Boffa +Co-developed-by: Arulprabhu Ponnusamy + Signed-off-by: Arulprabhu Ponnusamy +Co-developed-by: Arun Easi + Signed-off-by: Arun Easi +Co-developed-by: Karan Tilak Kumar + Signed-off-by: Karan Tilak Kumar +Link: https://lore.kernel.org/r/20241212020312.4786-6-kartilak@cisco.com + Signed-off-by: Martin K. Petersen +(cherry picked from commit f828af44b8ddef3500fda70ef1f6daffe97db36b) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c +* Unmerged path drivers/scsi/fnic/fdls_disc.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/rebuild.details.txt b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/rebuild.details.txt new file mode 100644 index 0000000000000..29d2ba36269b0 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.46.1.el9_6/rebuild.details.txt @@ -0,0 +1,67 @@ +Rebuild_History BUILDABLE +Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% +Number of commits in upstream range v5.14~1..kernel-mainline: 324124 +Number of commits in rpm: 130 +Number of commits matched with upstream: 128 (98.46%) +Number of commits in upstream but not in rpm: 323998 +Number of commits NOT found in upstream: 2 (1.54%) + +Rebuilding Kernel on Branch rocky9_6_rebuild_kernel-5.14.0-570.46.1.el9_6 for kernel-5.14.0-570.46.1.el9_6 +Clean Cherry Picks: 75 (58.59%) +Empty Cherry Picks: 49 (38.28%) +_______________________________ + +__EMPTY COMMITS__________________________ +9a71892cbcdb9d1459c84f5a4c722b14354158a5 Revert "driver core: Fix uevent_show() vs driver detach race" +a7510fbd879e98baf2848b4646adbbd2b9d0fbb3 scsi: fnic: Call scsi_done() directly +924cb24df4fc4d08d32fcb42fa967fdc3f2137cb scsi: fnic: Stop using the SCSI pointer +15924b0503630016dee4dbb945a8df4df659070b scsi: fnic: Replace sgreset tag with max_tag_id +514f0c400bde6b62405467daaf2a0a86bcf7794b scsi: fnic: Fix sg_reset success path +3df9dd0d51c2e4b0c4a400f8ce94308a2d93ef61 scsi: fnic: Add and improve log messages +848d010ab934f1b4326a516396873ddae41db056 scsi: fnic: Remove usage of host_lock +52f6e196e52ef834f928aac297d895f4c32276ea scsi: fnic: Add support for multiqueue (MQ) in fnic_main.c +c81df08cd2944f89921033e5f1744ae2960f4e69 scsi: fnic: Add support for multiqueue (MQ) in fnic driver +55cf715244a7dfda42191445d97628e837158091 scsi: fnic: Improve logs and add support for multiqueue (MQ) +38945c2b006b23a1a7a0c88d76e3294c6199891c scsi: fnic: unlock on error path in fnic_queuecommand() +e2813fc27d274747fa6e204e135e3c89cc6426a3 scsi: fnic: Replace shost_printk() with dev_info()/dev_err() +a63e78eb2b0f654b138abfc323f6bd7573e26145 scsi: fnic: Add support for fabric based solicited requests and responses +b5a57f153bdf772ed41ef286826cef7a1c52f433 scsi: fnic: Add support for target based solicited requests and responses +f828af44b8ddef3500fda70ef1f6daffe97db36b scsi: fnic: Add support for unsolicited requests and responses +2c77081969ee00ec31abda0cf6a26bc269f12ab2 scsi: fnic: Add Cisco hardware model names +09c1e6ab4ab2a107d96f119950dc330e446dc2b0 scsi: fnic: Add and integrate support for FDMI +098585aa8acab3fcd46ce908af84ef168f5ccab6 scsi: fnic: Add and integrate support for FIP +9cf9fe2f3ec5dad8b459267a9e977c0b7811b3f8 scsi: fnic: Add functionality in fnic to support FDLS +6335be1c5009f888367db095a0442cdb256980f8 scsi: fnic: Modify IO path to use FDLS +9243626c211e4d6f5add84c5a7b141e94a2e7222 scsi: fnic: Modify fnic interfaces to use FDLS +a8650a5eaaf123572a7b2d6b1fe9f6b000b6b6a6 scsi: fnic: Add stats and related functionality +7e6886b705fd8b338dbd4b7492bd45f0259cc55f scsi: fnic: Code cleanup +8d26bfcf1d2e829d37ef7f2b506b95e46f25f993 scsi: fnic: Add support to handle port channel RSCN +53021c192cc55074eee744cb41dcdfb9318d1f80 scsi: fnic: Increment driver version +5b6179d4b661e3c22ffa5f3fe2523bad4cd01983 scsi: fnic: Remove unnecessary else and unnecessary break in FDLS +6cfba11510d6f4d0e863fc0fa939c7a983cf13bd scsi: fnic: Remove extern definition from .c files +bab8551e33f7f5e8743ccb49be41fe3228178e8a scsi: fnic: Remove unnecessary else to fix warning in FDLS FIP +17789f8a5b81356fc83cf20de899fc351679574e scsi: fnic: Delete incorrect debugfs error handling +8ccc5947f5d1608f7217cdbee532c7fc2431f7c9 scsi: fnic: Fix use of uninitialized value in debug message +0620efe789a73586b5b3ed38b27d1b69b2150958 scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro +7dbe3aa2f3f83949174b64860dadfaeec3454cff scsi: fnic: Return appropriate error code for mem alloc failure +3986001ca11ec630d631467d788aac513c61cb52 scsi: fnic: Return appropriate error code from failure of scsi drv init +54428671aac88dd11074c47cb7e7726e41d40f4a scsi: fnic: Test for memory allocation failure and return error code +8697934682f1873b7b1cb9cc61b81edf042c9272 scsi: fnic: Propagate SCSI error code from fnic_scsi_drv_init() +9b2d1ecf8797a82371c9f9209722949fb35b4d15 scsi: fnic: Remove unnecessary debug print +9ae7563e270372f401a06486a92cdf151d1b27ee scsi: fnic: Fix indentation and remove unnecessary parenthesis +e984fa2542e1308d67140bd7a76f678dabbcd9a8 scsi: fnic: Replace use of sizeof with standard usage +7f5dce6e7f0150ee57b8d1186011f57fa62c2843 scsi: fnic: Replace fnic->lock_flags with local flags +f421692be10133a66a3a8a7d5c76fe9713ea5a8b scsi: fnic: Remove unnecessary spinlock locking and unlocking +160d6ec69f401037a9a00b9b6569082e4d0649b0 scsi: fnic: Remove redundant flush_workqueue() calls +bd067766ee2aeb35589ad74d599b0e6311f68c73 scsi: fnic: Remove unnecessary NUL-terminations +a35b29bdedb4d2ae3160d4d6684a6f1ecd9ca7c2 scsi: fnic: Fix crash in fnic_wq_cmpl_handler when FDMI times out +74f46a0524f8d2f01dc7ca95bb5fc463a8603e72 scsi: fnic: Turn off FDMI ACTIVE flags on link down +9b9b8594654a79e3d4166356fd86cd5397477b24 scsi: fnic: Add and improve logs in FDMI and FDMI ABTS paths +18b5cb6f1fdda4454f55a31f7c78d94da62be495 scsi: fnic: Set appropriate logging level for log message +85d6fbc47c3087c5d048e6734926b0c36af34fe9 scsi: fnic: Fix missing DMA mapping error in fnic_send_frame() +c353e8983e0dea5dbba7789033326e1ad34135b7 net: introduce per netns packet chains +cffd0441872e7f6b1fce5e78fb1c99187a291330 use uniform permission checks for all mount propagation changes + +__CHANGES NOT IN UPSTREAM________________ +Porting to Rocky Linux 9, debranding and Rocky branding' +Ensure aarch64 kernel is not compressed' diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/15f519e9.failed b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/15f519e9.failed new file mode 100644 index 0000000000000..c704b7448ca3a --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/15f519e9.failed @@ -0,0 +1,696 @@ +ceph: fix race condition validating r_parent before applying state + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.49.1.el9_6 +Rebuild_CHGLOG: - ceph: fix client race condition validating r_parent before applying state (Alex Markuze) [RHEL-114962] +Rebuild_FUZZ: 94.96% +commit-author Alex Markuze +commit 15f519e9f883b316d86e2bb6b767a023aafd9d83 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/15f519e9.failed + +Add validation to ensure the cached parent directory inode matches the +directory info in MDS replies. This prevents client-side race conditions +where concurrent operations (e.g. rename) cause r_parent to become stale +between request initiation and reply processing, which could lead to +applying state changes to incorrect directory inodes. + +[ idryomov: folded a kerneldoc fixup and a follow-up fix from Alex to + move CEPH_CAP_PIN reference when r_parent is updated: + + When the parent directory lock is not held, req->r_parent can become + stale and is updated to point to the correct inode. However, the + associated CEPH_CAP_PIN reference was not being adjusted. The + CEPH_CAP_PIN is a reference on an inode that is tracked for + accounting purposes. Moving this pin is important to keep the + accounting balanced. When the pin was not moved from the old parent + to the new one, it created two problems: The reference on the old, + stale parent was never released, causing a reference leak. + A reference for the new parent was never acquired, creating the risk + of a reference underflow later in ceph_mdsc_release_request(). This + patch corrects the logic by releasing the pin from the old parent and + acquiring it for the new parent when r_parent is switched. This + ensures reference accounting stays balanced. ] + + Cc: stable@vger.kernel.org + Signed-off-by: Alex Markuze + Reviewed-by: Viacheslav Dubeyko + Signed-off-by: Ilya Dryomov +(cherry picked from commit 15f519e9f883b316d86e2bb6b767a023aafd9d83) + Signed-off-by: Jonathan Maple + +# Conflicts: +# fs/ceph/debugfs.c +# fs/ceph/dir.c +# fs/ceph/file.c +# fs/ceph/inode.c +# fs/ceph/mds_client.c +# fs/ceph/mds_client.h +diff --cc fs/ceph/debugfs.c +index 3904333fa6c3,f3fe786b4143..000000000000 +--- a/fs/ceph/debugfs.c ++++ b/fs/ceph/debugfs.c +@@@ -81,8 -79,8 +79,13 @@@ static int mdsc_show(struct seq_file *s + if (req->r_inode) { + seq_printf(s, " #%llx", ceph_ino(req->r_inode)); + } else if (req->r_dentry) { +++<<<<<<< HEAD + + path = ceph_mdsc_build_path(req->r_dentry, &pathlen, + + &pathbase, 0); +++======= ++ struct ceph_path_info path_info; ++ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + if (IS_ERR(path)) + path = NULL; + spin_lock(&req->r_dentry->d_lock); +@@@ -100,8 -98,8 +103,13 @@@ + } + + if (req->r_old_dentry) { +++<<<<<<< HEAD + + path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, + + &pathbase, 0); +++======= ++ struct ceph_path_info path_info; ++ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + if (IS_ERR(path)) + path = NULL; + spin_lock(&req->r_old_dentry->d_lock); +diff --cc fs/ceph/dir.c +index e7b61aacd742,32973c62c1a2..000000000000 +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@@ -1224,10 -1271,8 +1224,15 @@@ static void ceph_async_unlink_cb(struc + + /* If op failed, mark everyone involved for errors */ + if (result) { +++<<<<<<< HEAD + + int pathlen = 0; + + u64 base = 0; + + char *path = ceph_mdsc_build_path(dentry, &pathlen, + + &base, 0); +++======= ++ struct ceph_path_info path_info = {0}; ++ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + + /* mark error on parent + clear complete */ + mapping_set_error(req->r_parent->i_mapping, result); +@@@ -1240,9 -1285,9 +1245,15 @@@ + /* mark inode itself for an error (since metadata is bogus) */ + mapping_set_error(req->r_old_inode->i_mapping, result); + +++<<<<<<< HEAD + + pr_warn("async unlink failure path=(%llx)%s result=%d!\n", + + base, IS_ERR(path) ? "<>" : path, result); + + ceph_mdsc_free_path(path, pathlen); +++======= ++ pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n", ++ path_info.vino.ino, IS_ERR(path) ? "<>" : path, result); ++ ceph_mdsc_free_path_info(&path_info); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + } + out: + iput(req->r_old_inode); +@@@ -1295,8 -1341,10 +1306,12 @@@ static int ceph_unlink(struct inode *di + struct inode *inode = d_inode(dentry); + struct ceph_mds_request *req; + bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); + - struct dentry *dn; + int err = -EROFS; + int op; +++<<<<<<< HEAD +++======= ++ char *path; +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + + if (ceph_snap(dir) == CEPH_SNAPDIR) { + /* rmdir .snap/foo is RMSNAP */ +@@@ -1309,6 -1358,31 +1324,34 @@@ + CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; + } else + goto out; +++<<<<<<< HEAD +++======= ++ ++ dn = d_find_alias(dir); ++ if (!dn) { ++ try_async = false; ++ } else { ++ struct ceph_path_info path_info; ++ path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0); ++ if (IS_ERR(path)) { ++ try_async = false; ++ err = 0; ++ } else { ++ err = ceph_mds_check_access(mdsc, path, MAY_WRITE); ++ } ++ ceph_mdsc_free_path_info(&path_info); ++ dput(dn); ++ ++ /* For none EACCES cases will let the MDS do the mds auth check */ ++ if (err == -EACCES) { ++ return err; ++ } else if (err < 0) { ++ try_async = false; ++ err = 0; ++ } ++ } ++ +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + retry: + req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); + if (IS_ERR(req)) { +diff --cc fs/ceph/file.c +index 8bec680bed46,978acd3d4b32..000000000000 +--- a/fs/ceph/file.c ++++ b/fs/ceph/file.c +@@@ -362,9 -366,13 +362,16 @@@ int ceph_open(struct inode *inode, stru + struct ceph_file_info *fi = file->private_data; + int err; + int flags, fmode, wanted; +++<<<<<<< HEAD +++======= ++ struct dentry *dentry; ++ char *path; ++ bool do_sync = false; ++ int mask = MAY_READ; +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + + if (fi) { + - doutc(cl, "file %p is already opened\n", file); + + dout("open file %p is already opened\n", file); + return 0; + } + +@@@ -383,6 -391,32 +390,35 @@@ + fmode = ceph_flags_to_mode(flags); + wanted = ceph_caps_for_mode(fmode); + +++<<<<<<< HEAD +++======= ++ if (fmode & CEPH_FILE_MODE_WR) ++ mask |= MAY_WRITE; ++ dentry = d_find_alias(inode); ++ if (!dentry) { ++ do_sync = true; ++ } else { ++ struct ceph_path_info path_info; ++ path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); ++ if (IS_ERR(path)) { ++ do_sync = true; ++ err = 0; ++ } else { ++ err = ceph_mds_check_access(mdsc, path, mask); ++ } ++ ceph_mdsc_free_path_info(&path_info); ++ dput(dentry); ++ ++ /* For none EACCES cases will let the MDS do the mds auth check */ ++ if (err == -EACCES) { ++ return err; ++ } else if (err < 0) { ++ do_sync = true; ++ err = 0; ++ } ++ } ++ +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + /* snapped files are read-only */ + if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) + return -EROFS; +@@@ -576,14 -613,13 +612,24 @@@ static void ceph_async_create_cb(struc + mapping_set_error(req->r_parent->i_mapping, result); + + if (result) { +++<<<<<<< HEAD + + int pathlen = 0; + + u64 base = 0; + + char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen, + + &base, 0); + + + + pr_warn("async create failure path=(%llx)%s result=%d!\n", + + base, IS_ERR(path) ? "<>" : path, result); + + ceph_mdsc_free_path(path, pathlen); +++======= ++ struct ceph_path_info path_info = {0}; ++ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0); ++ ++ pr_warn_client(cl, ++ "async create failure path=(%llx)%s result=%d!\n", ++ path_info.vino.ino, IS_ERR(path) ? "<>" : path, result); ++ ceph_mdsc_free_path_info(&path_info); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + + ceph_dir_clear_complete(req->r_parent); + if (!d_unhashed(dentry)) +@@@ -743,10 -787,11 +789,14 @@@ int ceph_atomic_open(struct inode *dir + bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); + int mask; + int err; +++<<<<<<< HEAD +++======= ++ char *path; +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + + - doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n", + - dir, ceph_vinop(dir), dentry, dentry, + - d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); + + dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", + + dir, dentry, dentry, + + d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); + + if (dentry->d_name.len > NAME_MAX) + return -ENAMETOOLONG; +@@@ -760,6 -805,35 +810,38 @@@ + */ + flags &= ~O_TRUNC; + +++<<<<<<< HEAD +++======= ++ dn = d_find_alias(dir); ++ if (!dn) { ++ try_async = false; ++ } else { ++ struct ceph_path_info path_info; ++ path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0); ++ if (IS_ERR(path)) { ++ try_async = false; ++ err = 0; ++ } else { ++ int fmode = ceph_flags_to_mode(flags); ++ ++ mask = MAY_READ; ++ if (fmode & CEPH_FILE_MODE_WR) ++ mask |= MAY_WRITE; ++ err = ceph_mds_check_access(mdsc, path, mask); ++ } ++ ceph_mdsc_free_path_info(&path_info); ++ dput(dn); ++ ++ /* For none EACCES cases will let the MDS do the mds auth check */ ++ if (err == -EACCES) { ++ return err; ++ } else if (err < 0) { ++ try_async = false; ++ err = 0; ++ } ++ } ++ +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + retry: + if (flags & O_CREAT) { + if (ceph_quota_is_max_files_exceeded(dir)) +diff --cc fs/ceph/inode.c +index 34cfeb0fba9e,8ac89ce6435c..000000000000 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@@ -2437,6 -2485,33 +2437,36 @@@ int __ceph_setattr(struct mnt_idmap *id + bool lock_snap_rwsem = false; + bool fill_fscrypt; + int truncate_retry = 20; /* The RMW will take around 50ms */ +++<<<<<<< HEAD +++======= ++ struct dentry *dentry; ++ char *path; ++ bool do_sync = false; ++ ++ dentry = d_find_alias(inode); ++ if (!dentry) { ++ do_sync = true; ++ } else { ++ struct ceph_path_info path_info; ++ path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); ++ if (IS_ERR(path)) { ++ do_sync = true; ++ err = 0; ++ } else { ++ err = ceph_mds_check_access(mdsc, path, MAY_WRITE); ++ } ++ ceph_mdsc_free_path_info(&path_info); ++ dput(dentry); ++ ++ /* For none EACCES cases will let the MDS do the mds auth check */ ++ if (err == -EACCES) { ++ return err; ++ } else if (err < 0) { ++ do_sync = true; ++ err = 0; ++ } ++ } +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + + retry: + prealloc_cf = ceph_alloc_cap_flush(); +diff --cc fs/ceph/mds_client.c +index 92bcf1cd8c16,3bc72b47fe4d..000000000000 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@@ -2630,9 -2679,9 +2630,8 @@@ static u8 *get_fscrypt_altname(const st + + /** + * ceph_mdsc_build_path - build a path string to a given dentry + - * @mdsc: mds client + * @dentry: dentry to which path should be built +- * @plen: returned length of string +- * @pbase: returned base inode number ++ * @path_info: output path, length, base ino+snap, and freepath ownership flag + * @for_wire: is this path going to be sent to the MDS? + * + * Build a string that represents the path to the dentry. This is mostly called +@@@ -2649,9 -2698,10 +2648,14 @@@ + * Encode hidden .snap dirs as a double /, i.e. + * foo/.snap/bar -> foo//bar + */ +++<<<<<<< HEAD + +char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase, + + int for_wire) +++======= ++ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry, ++ struct ceph_path_info *path_info, int for_wire) +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + { + - struct ceph_client *cl = mdsc->fsc->client; + struct dentry *cur; + struct inode *inode; + char *path; +@@@ -2753,24 -2802,35 +2757,49 @@@ retry + + if (pos < 0) { + /* + - * The path is longer than PATH_MAX and this function + - * cannot ever succeed. Creating paths that long is + - * possible with Ceph, but Linux cannot use them. + + * A rename didn't occur, but somehow we didn't end up where + + * we thought we would. Throw a warning and try again. + */ + - return ERR_PTR(-ENAMETOOLONG); + + pr_warn("build_path did not end path lookup where expected (pos = %d)\n", + + pos); + + goto retry; + } + +++<<<<<<< HEAD + + *pbase = base; + + *plen = PATH_MAX - 1 - pos; + + dout("build_path on %p %d built %llx '%.*s'\n", + + dentry, d_count(dentry), base, *plen, path + pos); + + return path + pos; + +} + + + +static int build_dentry_path(struct dentry *dentry, struct inode *dir, + + const char **ppath, int *ppathlen, u64 *pino, + + bool *pfreepath, bool parent_locked) +++======= ++ /* Initialize the output structure */ ++ memset(path_info, 0, sizeof(*path_info)); ++ ++ path_info->vino.ino = base; ++ path_info->pathlen = PATH_MAX - 1 - pos; ++ path_info->path = path + pos; ++ path_info->freepath = true; ++ ++ /* Set snap from dentry if available */ ++ if (d_inode(dentry)) ++ path_info->vino.snap = ceph_snap(d_inode(dentry)); ++ else ++ path_info->vino.snap = CEPH_NOSNAP; ++ ++ doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry), ++ base, PATH_MAX - 1 - pos, path + pos); ++ return path + pos; ++ } ++ ++ static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry, ++ struct inode *dir, struct ceph_path_info *path_info, ++ bool parent_locked) +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + { + char *path; + +@@@ -2786,18 -2848,18 +2817,21 @@@ + return 0; + } + rcu_read_unlock(); +++<<<<<<< HEAD + + path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); +++======= ++ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + if (IS_ERR(path)) + return PTR_ERR(path); +- *ppath = path; +- *pfreepath = true; ++ /* ++ * ceph_mdsc_build_path already fills path_info, including snap handling. ++ */ + return 0; + } + +- static int build_inode_path(struct inode *inode, +- const char **ppath, int *ppathlen, u64 *pino, +- bool *pfreepath) ++ static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info) + { + - struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); + struct dentry *dentry; + char *path; + +@@@ -2807,7 -2871,7 +2843,11 @@@ + return 0; + } + dentry = d_find_alias(inode); +++<<<<<<< HEAD + + path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); +++======= ++ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + dput(dentry); + if (IS_ERR(path)) + return PTR_ERR(path); +@@@ -2820,27 -2887,34 +2863,56 @@@ + * request arguments may be specified via an inode *, a dentry *, or + * an explicit ino+path. + */ +++<<<<<<< HEAD + +static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, + + struct inode *rdiri, const char *rpath, + + u64 rino, const char **ppath, int *pathlen, + + u64 *ino, bool *freepath, bool parent_locked) +++======= ++ static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode, ++ struct dentry *rdentry, struct inode *rdiri, ++ const char *rpath, u64 rino, ++ struct ceph_path_info *path_info, ++ bool parent_locked) +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + { + - struct ceph_client *cl = mdsc->fsc->client; + int r = 0; + ++ /* Initialize the output structure */ ++ memset(path_info, 0, sizeof(*path_info)); ++ + if (rinode) { +++<<<<<<< HEAD + + r = build_inode_path(rinode, ppath, pathlen, ino, freepath); + + dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), + + ceph_snap(rinode)); + + } else if (rdentry) { + + r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, + + freepath, parent_locked); + + dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, + + *ppath); + + } else if (rpath || rino) { + + *ino = rino; + + *ppath = rpath; + + *pathlen = rpath ? strlen(rpath) : 0; + + dout(" path %.*s\n", *pathlen, rpath); +++======= ++ r = build_inode_path(rinode, path_info); ++ doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode), ++ ceph_snap(rinode)); ++ } else if (rdentry) { ++ r = build_dentry_path(mdsc, rdentry, rdiri, path_info, parent_locked); ++ doutc(cl, " dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino, ++ path_info->pathlen, path_info->path); ++ } else if (rpath || rino) { ++ path_info->vino.ino = rino; ++ path_info->vino.snap = CEPH_NOSNAP; ++ path_info->path = rpath; ++ path_info->pathlen = rpath ? strlen(rpath) : 0; ++ path_info->freepath = false; ++ ++ doutc(cl, " path %.*s\n", path_info->pathlen, rpath); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + } + + return r; +@@@ -2891,25 -2988,25 +2963,42 @@@ static struct ceph_msg *create_request_ + { + int mds = session->s_mds; + struct ceph_mds_client *mdsc = session->s_mdsc; + - struct ceph_client *cl = mdsc->fsc->client; + struct ceph_msg *msg; +++<<<<<<< HEAD + + struct ceph_mds_request_head_old *head; + + const char *path1 = NULL; + + const char *path2 = NULL; + + u64 ino1 = 0, ino2 = 0; + + int pathlen1 = 0, pathlen2 = 0; + + bool freepath1 = false, freepath2 = false; +++======= ++ struct ceph_mds_request_head_legacy *lhead; ++ struct ceph_path_info path_info1 = {0}; ++ struct ceph_path_info path_info2 = {0}; +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + struct dentry *old_dentry = NULL; + int len; + u16 releases; + void *p, *end; + int ret; + bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME); +++<<<<<<< HEAD + + + + ret = set_request_path_attr(req->r_inode, req->r_dentry, + + req->r_parent, req->r_path1, req->r_ino1.ino, + + &path1, &pathlen1, &ino1, &freepath1, + + test_bit(CEPH_MDS_R_PARENT_LOCKED, + + &req->r_req_flags)); +++======= ++ u16 request_head_version = mds_supported_head_version(session); ++ kuid_t caller_fsuid = req->r_cred->fsuid; ++ kgid_t caller_fsgid = req->r_cred->fsgid; ++ bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); ++ ++ ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry, ++ req->r_parent, req->r_path1, req->r_ino1.ino, ++ &path_info1, parent_locked); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + if (ret < 0) { + msg = ERR_PTR(ret); + goto out; +@@@ -2919,10 -3041,10 +3033,17 @@@ + if (req->r_old_dentry && + !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED)) + old_dentry = req->r_old_dentry; +++<<<<<<< HEAD + + ret = set_request_path_attr(NULL, old_dentry, + + req->r_old_dentry_dir, + + req->r_path2, req->r_ino2.ino, + + &path2, &pathlen2, &ino2, &freepath2, true); +++======= ++ ret = set_request_path_attr(mdsc, NULL, old_dentry, ++ req->r_old_dentry_dir, ++ req->r_path2, req->r_ino2.ino, ++ &path_info2, true); +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + if (ret < 0) { + msg = ERR_PTR(ret); + goto out_free1; +@@@ -2999,17 -3189,17 +3120,17 @@@ + + end = msg->front.iov_base + msg->front.iov_len; + + - lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); + - lhead->op = cpu_to_le32(req->r_op); + - lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, + - caller_fsuid)); + - lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, + - caller_fsgid)); + - lhead->ino = cpu_to_le64(req->r_deleg_ino); + - lhead->args = req->r_args; + + head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); + + head->op = cpu_to_le32(req->r_op); + + head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, + + req->r_cred->fsuid)); + + head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, + + req->r_cred->fsgid)); + + head->ino = cpu_to_le64(req->r_deleg_ino); + + head->args = req->r_args; + +- ceph_encode_filepath(&p, end, ino1, path1); +- ceph_encode_filepath(&p, end, ino2, path2); ++ ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path); ++ ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path); + + /* make note of release offset, in case we need to replay */ + req->r_request_release_offset = p - msg->front.iov_base; +@@@ -4331,7 -4628,7 +4449,11 @@@ static int reconnect_caps_cb(struct ino + dentry = d_find_primary(inode); + if (dentry) { + /* set pathbase to parent dir when msg_version >= 2 */ +++<<<<<<< HEAD + + path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, +++======= ++ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + recon_state->msg_version >= 2); + dput(dentry); + if (IS_ERR(path)) { +@@@ -4381,10 -4677,12 +4500,10 @@@ + rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); + rec.v1.issued = cpu_to_le32(cap->issued); + rec.v1.size = cpu_to_le64(i_size_read(inode)); + - ts = inode_get_mtime(inode); + - ceph_encode_timespec64(&rec.v1.mtime, &ts); + - ts = inode_get_atime(inode); + - ceph_encode_timespec64(&rec.v1.atime, &ts); + + ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime); + + ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime); + rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); +- rec.v1.pathbase = cpu_to_le64(pathbase); ++ rec.v1.pathbase = cpu_to_le64(path_info.vino.ino); + } + + if (list_empty(&ci->i_cap_snaps)) { +diff --cc fs/ceph/mds_client.h +index b45ce3fa8790,0428a5eaf28c..000000000000 +--- a/fs/ceph/mds_client.h ++++ b/fs/ceph/mds_client.h +@@@ -575,15 -612,29 +575,30 @@@ extern void ceph_queue_cap_unlink_work( + extern int ceph_iterate_session_caps(struct ceph_mds_session *session, + int (*cb)(struct inode *, int mds, void *), + void *arg); + -extern int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, + - int mask); + - + extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); + +- static inline void ceph_mdsc_free_path(char *path, int len) ++ /* ++ * Structure to group path-related output parameters for build_*_path functions ++ */ ++ struct ceph_path_info { ++ const char *path; ++ int pathlen; ++ struct ceph_vino vino; ++ bool freepath; ++ }; ++ ++ static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info) + { +- if (!IS_ERR_OR_NULL(path)) +- __putname(path - (PATH_MAX - 1 - len)); ++ if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path)) ++ __putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen)); + } + +++<<<<<<< HEAD + +extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, +++======= ++ extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, ++ struct dentry *dentry, struct ceph_path_info *path_info, +++>>>>>>> 15f519e9f883 (ceph: fix race condition validating r_parent before applying state) + int for_wire); + + extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry); +* Unmerged path fs/ceph/debugfs.c +* Unmerged path fs/ceph/dir.c +* Unmerged path fs/ceph/file.c +* Unmerged path fs/ceph/inode.c +* Unmerged path fs/ceph/mds_client.c +* Unmerged path fs/ceph/mds_client.h diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/690e47d1.failed b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/690e47d1.failed new file mode 100644 index 0000000000000..5c811f5374d80 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/690e47d1.failed @@ -0,0 +1,244 @@ +sched/rt: Fix race in push_rt_task + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.49.1.el9_6 +commit-author Harshit Agarwal +commit 690e47d1403e90b7f2366f03b52ed3304194c793 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/690e47d1.failed + +Overview +======== +When a CPU chooses to call push_rt_task and picks a task to push to +another CPU's runqueue then it will call find_lock_lowest_rq method +which would take a double lock on both CPUs' runqueues. If one of the +locks aren't readily available, it may lead to dropping the current +runqueue lock and reacquiring both the locks at once. During this window +it is possible that the task is already migrated and is running on some +other CPU. These cases are already handled. However, if the task is +migrated and has already been executed and another CPU is now trying to +wake it up (ttwu) such that it is queued again on the runqeue +(on_rq is 1) and also if the task was run by the same CPU, then the +current checks will pass even though the task was migrated out and is no +longer in the pushable tasks list. + +Crashes +======= +This bug resulted in quite a few flavors of crashes triggering kernel +panics with various crash signatures such as assert failures, page +faults, null pointer dereferences, and queue corruption errors all +coming from scheduler itself. + +Some of the crashes: +-> kernel BUG at kernel/sched/rt.c:1616! BUG_ON(idx >= MAX_RT_PRIO) + Call Trace: + ? __die_body+0x1a/0x60 + ? die+0x2a/0x50 + ? do_trap+0x85/0x100 + ? pick_next_task_rt+0x6e/0x1d0 + ? do_error_trap+0x64/0xa0 + ? pick_next_task_rt+0x6e/0x1d0 + ? exc_invalid_op+0x4c/0x60 + ? pick_next_task_rt+0x6e/0x1d0 + ? asm_exc_invalid_op+0x12/0x20 + ? pick_next_task_rt+0x6e/0x1d0 + __schedule+0x5cb/0x790 + ? update_ts_time_stats+0x55/0x70 + schedule_idle+0x1e/0x40 + do_idle+0x15e/0x200 + cpu_startup_entry+0x19/0x20 + start_secondary+0x117/0x160 + secondary_startup_64_no_verify+0xb0/0xbb + +-> BUG: kernel NULL pointer dereference, address: 00000000000000c0 + Call Trace: + ? __die_body+0x1a/0x60 + ? no_context+0x183/0x350 + ? __warn+0x8a/0xe0 + ? exc_page_fault+0x3d6/0x520 + ? asm_exc_page_fault+0x1e/0x30 + ? pick_next_task_rt+0xb5/0x1d0 + ? pick_next_task_rt+0x8c/0x1d0 + __schedule+0x583/0x7e0 + ? update_ts_time_stats+0x55/0x70 + schedule_idle+0x1e/0x40 + do_idle+0x15e/0x200 + cpu_startup_entry+0x19/0x20 + start_secondary+0x117/0x160 + secondary_startup_64_no_verify+0xb0/0xbb + +-> BUG: unable to handle page fault for address: ffff9464daea5900 + kernel BUG at kernel/sched/rt.c:1861! BUG_ON(rq->cpu != task_cpu(p)) + +-> kernel BUG at kernel/sched/rt.c:1055! BUG_ON(!rq->nr_running) + Call Trace: + ? __die_body+0x1a/0x60 + ? die+0x2a/0x50 + ? do_trap+0x85/0x100 + ? dequeue_top_rt_rq+0xa2/0xb0 + ? do_error_trap+0x64/0xa0 + ? dequeue_top_rt_rq+0xa2/0xb0 + ? exc_invalid_op+0x4c/0x60 + ? dequeue_top_rt_rq+0xa2/0xb0 + ? asm_exc_invalid_op+0x12/0x20 + ? dequeue_top_rt_rq+0xa2/0xb0 + dequeue_rt_entity+0x1f/0x70 + dequeue_task_rt+0x2d/0x70 + __schedule+0x1a8/0x7e0 + ? blk_finish_plug+0x25/0x40 + schedule+0x3c/0xb0 + futex_wait_queue_me+0xb6/0x120 + futex_wait+0xd9/0x240 + do_futex+0x344/0xa90 + ? get_mm_exe_file+0x30/0x60 + ? audit_exe_compare+0x58/0x70 + ? audit_filter_rules.constprop.26+0x65e/0x1220 + __x64_sys_futex+0x148/0x1f0 + do_syscall_64+0x30/0x80 + entry_SYSCALL_64_after_hwframe+0x62/0xc7 + +-> BUG: unable to handle page fault for address: ffff8cf3608bc2c0 + Call Trace: + ? __die_body+0x1a/0x60 + ? no_context+0x183/0x350 + ? spurious_kernel_fault+0x171/0x1c0 + ? exc_page_fault+0x3b6/0x520 + ? plist_check_list+0x15/0x40 + ? plist_check_list+0x2e/0x40 + ? asm_exc_page_fault+0x1e/0x30 + ? _cond_resched+0x15/0x30 + ? futex_wait_queue_me+0xc8/0x120 + ? futex_wait+0xd9/0x240 + ? try_to_wake_up+0x1b8/0x490 + ? futex_wake+0x78/0x160 + ? do_futex+0xcd/0xa90 + ? plist_check_list+0x15/0x40 + ? plist_check_list+0x2e/0x40 + ? plist_del+0x6a/0xd0 + ? plist_check_list+0x15/0x40 + ? plist_check_list+0x2e/0x40 + ? dequeue_pushable_task+0x20/0x70 + ? __schedule+0x382/0x7e0 + ? asm_sysvec_reschedule_ipi+0xa/0x20 + ? schedule+0x3c/0xb0 + ? exit_to_user_mode_prepare+0x9e/0x150 + ? irqentry_exit_to_user_mode+0x5/0x30 + ? asm_sysvec_reschedule_ipi+0x12/0x20 + +Above are some of the common examples of the crashes that were observed +due to this issue. + +Details +======= +Let's look at the following scenario to understand this race. + +1) CPU A enters push_rt_task + a) CPU A has chosen next_task = task p. + b) CPU A calls find_lock_lowest_rq(Task p, CPU Z’s rq). + c) CPU A identifies CPU X as a destination CPU (X < Z). + d) CPU A enters double_lock_balance(CPU Z’s rq, CPU X’s rq). + e) Since X is lower than Z, CPU A unlocks CPU Z’s rq. Someone else has + locked CPU X’s rq, and thus, CPU A must wait. + +2) At CPU Z + a) Previous task has completed execution and thus, CPU Z enters + schedule, locks its own rq after CPU A releases it. + b) CPU Z dequeues previous task and begins executing task p. + c) CPU Z unlocks its rq. + d) Task p yields the CPU (ex. by doing IO or waiting to acquire a + lock) which triggers the schedule function on CPU Z. + e) CPU Z enters schedule again, locks its own rq, and dequeues task p. + f) As part of dequeue, it sets p.on_rq = 0 and unlocks its rq. + +3) At CPU B + a) CPU B enters try_to_wake_up with input task p. + b) Since CPU Z dequeued task p, p.on_rq = 0, and CPU B updates + B.state = WAKING. + c) CPU B via select_task_rq determines CPU Y as the target CPU. + +4) The race + a) CPU A acquires CPU X’s lock and relocks CPU Z. + b) CPU A reads task p.cpu = Z and incorrectly concludes task p is + still on CPU Z. + c) CPU A failed to notice task p had been dequeued from CPU Z while + CPU A was waiting for locks in double_lock_balance. If CPU A knew + that task p had been dequeued, it would return NULL forcing + push_rt_task to give up the task p's migration. + d) CPU B updates task p.cpu = Y and calls ttwu_queue. + e) CPU B locks Ys rq. CPU B enqueues task p onto Y and sets task + p.on_rq = 1. + f) CPU B unlocks CPU Y, triggering memory synchronization. + g) CPU A reads task p.on_rq = 1, cementing its assumption that task p + has not migrated. + h) CPU A decides to migrate p to CPU X. + +This leads to A dequeuing p from Y's queue and various crashes down the +line. + +Solution +======== +The solution here is fairly simple. After obtaining the lock (at 4a), +the check is enhanced to make sure that the task is still at the head of +the pushable tasks list. If not, then it is anyway not suitable for +being pushed out. + +Testing +======= +The fix is tested on a cluster of 3 nodes, where the panics due to this +are hit every couple of days. A fix similar to this was deployed on such +cluster and was stable for more than 30 days. + +Co-developed-by: Jon Kohler + Signed-off-by: Jon Kohler +Co-developed-by: Gauri Patwardhan + Signed-off-by: Gauri Patwardhan +Co-developed-by: Rahul Chunduru + Signed-off-by: Rahul Chunduru + Signed-off-by: Harshit Agarwal + Signed-off-by: Peter Zijlstra (Intel) + Reviewed-by: "Steven Rostedt (Google)" + Reviewed-by: Phil Auld + Tested-by: Will Ton + Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20250225180553.167995-1-harshit@nutanix.com +(cherry picked from commit 690e47d1403e90b7f2366f03b52ed3304194c793) + Signed-off-by: Jonathan Maple + +# Conflicts: +# kernel/sched/rt.c +diff --cc kernel/sched/rt.c +index e6eea6e98e74,e40422c37033..000000000000 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@@ -1974,26 -1974,6 +1993,29 @@@ static struct rq *find_lock_lowest_rq(s + return lowest_rq; + } + +++<<<<<<< HEAD + +static struct task_struct *pick_next_pushable_task(struct rq *rq) + +{ + + struct task_struct *p; + + + + if (!has_pushable_tasks(rq)) + + return NULL; + + + + p = plist_first_entry(&rq->rt.pushable_tasks, + + struct task_struct, pushable_tasks); + + + + BUG_ON(rq->cpu != task_cpu(p)); + + BUG_ON(task_current(rq, p)); + + BUG_ON(p->nr_cpus_allowed <= 1); + + + + BUG_ON(!task_on_rq_queued(p)); + + BUG_ON(!rt_task(p)); + + + + return p; + +} + + +++======= +++>>>>>>> 690e47d1403e (sched/rt: Fix race in push_rt_task) + /* + * If the current CPU has more than one RT task, see if the non + * running task can migrate over to a CPU that is running a task +* Unmerged path kernel/sched/rt.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/91463946.failed b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/91463946.failed new file mode 100644 index 0000000000000..c9921dc376816 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/91463946.failed @@ -0,0 +1,202 @@ +ice: Add in/out PTP pin delays + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.49.1.el9_6 +commit-author Karol Kolacinski +commit 914639464b760a4ec659a46cc2de9a2fdc4eff5a +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/91463946.failed + +HW can have different input/output delays for each of the pins. + +Currently, only E82X adapters have delay compensation based on TSPLL +config and E810 adapters have constant 1 ms compensation, both cases +only for output delays and the same one for all pins. + +E825 adapters have different delays for SDP and other pins. Those +delays are also based on direction and input delays are different than +output ones. This is the main reason for moving delays to pin +description structure. + +Add a field in ice_ptp_pin_desc structure to reflect that. Delay values +are based on approximate calculations of HW delays based on HW spec. + +Implement external timestamp (input) delay compensation. + +Remove existing definitions and wrappers for periodic output propagation +delays. + + Reviewed-by: Przemek Kitszel + Signed-off-by: Karol Kolacinski + Reviewed-by: Simon Horman + Tested-by: Sunitha Mekala (A Contingent worker at Intel) + Signed-off-by: Tony Nguyen +(cherry picked from commit 914639464b760a4ec659a46cc2de9a2fdc4eff5a) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/net/ethernet/intel/ice/ice_ptp.c +diff --cc drivers/net/ethernet/intel/ice/ice_ptp.c +index 360f8397a8c0,3eef0fea0c80..000000000000 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c +@@@ -1832,11 -1851,10 +1844,16 @@@ static int ice_ptp_cfg_perout(struct ic + div64_u64_rem(start, period, &phase); + + /* If we have only phase or start time is in the past, start the timer + - * at the next multiple of period, maintaining phase. + + * at the next multiple of period, maintaining phase at least 0.5 second + + * from now, so we have time to write it to HW. + */ +++<<<<<<< HEAD + + clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500; + + if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw)) +++======= ++ clk = ice_ptp_read_src_clk_reg(pf, NULL); ++ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) +++>>>>>>> 914639464b76 (ice: Add in/out PTP pin delays) + start = div64_u64(clk + period - 1, period) * period + phase; + + /* Compensate for propagation delay from the generator to the pin. */ +* Unmerged path drivers/net/ethernet/intel/ice/ice_ptp.c +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h +index c490d98fd9c6..a1d0e988c084 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.h ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.h +@@ -211,6 +211,7 @@ enum ice_ptp_pin_nvm { + * struct ice_ptp_pin_desc - hardware pin description data + * @name_idx: index of the name of pin in ice_pin_names + * @gpio: the associated GPIO input and output pins ++ * @delay: input and output signal delays in nanoseconds + * + * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array + * for the device. Device families have separate sets of available pins with +@@ -219,6 +220,7 @@ enum ice_ptp_pin_nvm { + struct ice_ptp_pin_desc { + int name_idx; + int gpio[2]; ++ unsigned int delay[2]; + }; + + /** +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +index bee674b43e13..fe362e3faff1 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h ++++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +@@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { + 823437500, /* 823.4375 MHz PLL */ + /* nominal_incval */ + 0x136e44fabULL, +- /* pps_delay */ +- 11, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ +@@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { + 783360000, /* 783.36 MHz */ + /* nominal_incval */ + 0x146cc2177ULL, +- /* pps_delay */ +- 12, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ +@@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { + 796875000, /* 796.875 MHz */ + /* nominal_incval */ + 0x141414141ULL, +- /* pps_delay */ +- 12, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ +@@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { + 816000000, /* 816 MHz */ + /* nominal_incval */ + 0x139b9b9baULL, +- /* pps_delay */ +- 12, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ +@@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { + 830078125, /* 830.78125 MHz */ + /* nominal_incval */ + 0x134679aceULL, +- /* pps_delay */ +- 11, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ +@@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { + 783360000, /* 783.36 MHz */ + /* nominal_incval */ + 0x146cc2177ULL, +- /* pps_delay */ +- 12, + }, + }; + +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +index ea3b8dc933a5..2e1dd3f258fd 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h ++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g { + * struct ice_time_ref_info_e82x + * @pll_freq: Frequency of PLL that drives timer ticks in Hz + * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L +- * @pps_delay: propagation delay of the PPS output signal + * + * Characteristic information for the various TIME_REF sources possible in the + * E822 devices +@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g { + struct ice_time_ref_info_e82x { + u64 pll_freq; + u64 nominal_incval; +- u8 pps_delay; + }; + + /** +@@ -326,8 +324,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; + */ + #define ICE_E810_PLL_FREQ 812500000 + #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +-#define ICE_E810_OUT_PROP_DELAY_NS 1 +-#define ICE_E825C_OUT_PROP_DELAY_NS 11 + + /* Device agnostic functions */ + u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); +@@ -389,11 +385,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) + return e82x_time_ref[time_ref].nominal_incval; + } + +-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) +-{ +- return e82x_time_ref[time_ref].pps_delay; +-} +- + /* E822 Vernier calibration functions */ + int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); + int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); +@@ -434,20 +425,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); + #define ICE_ETH56G_NOMINAL_THRESH4 0x7777 + #define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 + +-static inline u64 ice_prop_delay(const struct ice_hw *hw) +-{ +- switch (hw->ptp.phy_model) { +- case ICE_PHY_ETH56G: +- return ICE_E825C_OUT_PROP_DELAY_NS; +- case ICE_PHY_E810: +- return ICE_E810_OUT_PROP_DELAY_NS; +- case ICE_PHY_E82X: +- return ice_e82x_pps_delay(ice_e82x_time_ref(hw)); +- default: +- return 0; +- } +-} +- + /** + * ice_get_base_incval - Get base clock increment value + * @hw: pointer to the HW struct diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/a61a3e96.failed b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/a61a3e96.failed new file mode 100644 index 0000000000000..706b91e0283a8 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/a61a3e96.failed @@ -0,0 +1,810 @@ +selftests: tls: add tests for zero-length records + +jira LE-4311 +cve CVE-2025-39682 +Rebuild_History Non-Buildable kernel-5.14.0-570.49.1.el9_6 +commit-author Jakub Kicinski +commit a61a3e961baff65b0a49f862fe21ce304f279b24 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/a61a3e96.failed + +Test various combinations of zero-length records. +Unfortunately, kernel cannot be coerced into producing those, +so hardcode the ciphertext messages in the test. + + Reviewed-by: Sabrina Dubroca +Link: https://patch.msgid.link/20250820021952.143068-2-kuba@kernel.org + Signed-off-by: Jakub Kicinski +(cherry picked from commit a61a3e961baff65b0a49f862fe21ce304f279b24) + Signed-off-by: Jonathan Maple + +# Conflicts: +# tools/testing/selftests/net/tls.c +diff --cc tools/testing/selftests/net/tls.c +index 6f573be09e01,0f5640d8dc7f..000000000000 +--- a/tools/testing/selftests/net/tls.c ++++ b/tools/testing/selftests/net/tls.c +@@@ -1599,6 -1682,778 +1611,781 @@@ TEST_F(tls, recv_efault + EXPECT_EQ(memcmp(rec2, recv_mem + 9, ret - 9), 0); + } + +++<<<<<<< HEAD +++======= ++ #define TLS_RECORD_TYPE_HANDSHAKE 0x16 ++ /* key_update, length 1, update_not_requested */ ++ static const char key_update_msg[] = "\x18\x00\x00\x01\x00"; ++ static void tls_send_keyupdate(struct __test_metadata *_metadata, int fd) ++ { ++ size_t len = sizeof(key_update_msg); ++ ++ EXPECT_EQ(tls_send_cmsg(fd, TLS_RECORD_TYPE_HANDSHAKE, ++ (char *)key_update_msg, len, 0), ++ len); ++ } ++ ++ static void tls_recv_keyupdate(struct __test_metadata *_metadata, int fd, int flags) ++ { ++ char buf[100]; ++ ++ EXPECT_EQ(tls_recv_cmsg(_metadata, fd, TLS_RECORD_TYPE_HANDSHAKE, buf, sizeof(buf), flags), ++ sizeof(key_update_msg)); ++ EXPECT_EQ(memcmp(buf, key_update_msg, sizeof(key_update_msg)), 0); ++ } ++ ++ /* set the key to 0 then 1 for RX, immediately to 1 for TX */ ++ TEST_F(tls_basic, rekey_rx) ++ { ++ struct tls_crypto_info_keys tls12_0, tls12_1; ++ char const *test_str = "test_message"; ++ int send_len = strlen(test_str) + 1; ++ char buf[20]; ++ int ret; ++ ++ if (self->notls) ++ return; ++ ++ tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128, ++ &tls12_0, 0); ++ tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128, ++ &tls12_1, 1); ++ ++ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12_1, tls12_1.len); ++ ASSERT_EQ(ret, 0); ++ ++ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12_0, tls12_0.len); ++ ASSERT_EQ(ret, 0); ++ ++ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12_1, tls12_1.len); ++ EXPECT_EQ(ret, 0); ++ ++ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); ++ EXPECT_EQ(memcmp(buf, test_str, send_len), 0); ++ } ++ ++ /* set the key to 0 then 1 for TX, immediately to 1 for RX */ ++ TEST_F(tls_basic, rekey_tx) ++ { ++ struct tls_crypto_info_keys tls12_0, tls12_1; ++ char const *test_str = "test_message"; ++ int send_len = strlen(test_str) + 1; ++ char buf[20]; ++ int ret; ++ ++ if (self->notls) ++ return; ++ ++ tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128, ++ &tls12_0, 0); ++ tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128, ++ &tls12_1, 1); ++ ++ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12_0, tls12_0.len); ++ ASSERT_EQ(ret, 0); ++ ++ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12_1, tls12_1.len); ++ ASSERT_EQ(ret, 0); ++ ++ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12_1, tls12_1.len); ++ EXPECT_EQ(ret, 0); ++ ++ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); ++ EXPECT_EQ(memcmp(buf, test_str, send_len), 0); ++ } ++ ++ TEST_F(tls_basic, disconnect) ++ { ++ char const *test_str = "test_message"; ++ int send_len = strlen(test_str) + 1; ++ struct tls_crypto_info_keys key; ++ struct sockaddr_in addr; ++ char buf[20]; ++ int ret; ++ ++ if (self->notls) ++ return; ++ ++ tls_crypto_info_init(TLS_1_3_VERSION, TLS_CIPHER_AES_GCM_128, ++ &key, 0); ++ ++ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &key, key.len); ++ ASSERT_EQ(ret, 0); ++ ++ /* Pre-queue the data so that setsockopt parses it but doesn't ++ * dequeue it from the TCP socket. recvmsg would dequeue. ++ */ ++ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); ++ ++ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &key, key.len); ++ ASSERT_EQ(ret, 0); ++ ++ addr.sin_family = AF_UNSPEC; ++ addr.sin_addr.s_addr = htonl(INADDR_ANY); ++ addr.sin_port = 0; ++ ret = connect(self->cfd, &addr, sizeof(addr)); ++ EXPECT_EQ(ret, -1); ++ EXPECT_EQ(errno, EOPNOTSUPP); ++ ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); ++ } ++ ++ TEST_F(tls, rekey) ++ { ++ char const *test_str_1 = "test_message_before_rekey"; ++ char const *test_str_2 = "test_message_after_rekey"; ++ struct tls_crypto_info_keys tls12; ++ int send_len; ++ char buf[100]; ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ /* initial send/recv */ ++ send_len = strlen(test_str_1) + 1; ++ EXPECT_EQ(send(self->fd, test_str_1, send_len, 0), send_len); ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); ++ EXPECT_EQ(memcmp(buf, test_str_1, send_len), 0); ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ /* send after rekey */ ++ send_len = strlen(test_str_2) + 1; ++ EXPECT_EQ(send(self->fd, test_str_2, send_len, 0), send_len); ++ ++ /* can't receive the KeyUpdate without a control message */ ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); ++ ++ /* get KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ ++ /* recv blocking -> -EKEYEXPIRED */ ++ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), 0), -1); ++ EXPECT_EQ(errno, EKEYEXPIRED); ++ ++ /* recv non-blocking -> -EKEYEXPIRED */ ++ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1); ++ EXPECT_EQ(errno, EKEYEXPIRED); ++ ++ /* update RX key */ ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ ++ /* recv after rekey */ ++ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); ++ EXPECT_EQ(memcmp(buf, test_str_2, send_len), 0); ++ } ++ ++ TEST_F(tls, rekey_fail) ++ { ++ char const *test_str_1 = "test_message_before_rekey"; ++ char const *test_str_2 = "test_message_after_rekey"; ++ struct tls_crypto_info_keys tls12; ++ int send_len; ++ char buf[100]; ++ ++ /* initial send/recv */ ++ send_len = strlen(test_str_1) + 1; ++ EXPECT_EQ(send(self->fd, test_str_1, send_len, 0), send_len); ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); ++ EXPECT_EQ(memcmp(buf, test_str_1, send_len), 0); ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ ++ if (variant->tls_version != TLS_1_3_VERSION) { ++ /* just check that rekey is not supported and return */ ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), -1); ++ EXPECT_EQ(errno, EBUSY); ++ return; ++ } ++ ++ /* successful update */ ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ /* invalid update: change of version */ ++ tls_crypto_info_init(TLS_1_2_VERSION, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), -1); ++ EXPECT_EQ(errno, EINVAL); ++ ++ /* invalid update (RX socket): change of version */ ++ tls_crypto_info_init(TLS_1_2_VERSION, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), -1); ++ EXPECT_EQ(errno, EINVAL); ++ ++ /* invalid update: change of cipher */ ++ if (variant->cipher_type == TLS_CIPHER_AES_GCM_256) ++ tls_crypto_info_init(variant->tls_version, TLS_CIPHER_CHACHA20_POLY1305, &tls12, 1); ++ else ++ tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_256, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), -1); ++ EXPECT_EQ(errno, EINVAL); ++ ++ /* send after rekey, the invalid updates shouldn't have an effect */ ++ send_len = strlen(test_str_2) + 1; ++ EXPECT_EQ(send(self->fd, test_str_2, send_len, 0), send_len); ++ ++ /* can't receive the KeyUpdate without a control message */ ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); ++ ++ /* get KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ ++ /* recv blocking -> -EKEYEXPIRED */ ++ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), 0), -1); ++ EXPECT_EQ(errno, EKEYEXPIRED); ++ ++ /* recv non-blocking -> -EKEYEXPIRED */ ++ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1); ++ EXPECT_EQ(errno, EKEYEXPIRED); ++ ++ /* update RX key */ ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ ++ /* recv after rekey */ ++ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1); ++ EXPECT_EQ(memcmp(buf, test_str_2, send_len), 0); ++ } ++ ++ TEST_F(tls, rekey_peek) ++ { ++ char const *test_str_1 = "test_message_before_rekey"; ++ struct tls_crypto_info_keys tls12; ++ int send_len; ++ char buf[100]; ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ send_len = strlen(test_str_1) + 1; ++ EXPECT_EQ(send(self->fd, test_str_1, send_len, 0), send_len); ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_PEEK), send_len); ++ EXPECT_EQ(memcmp(buf, test_str_1, send_len), 0); ++ ++ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len); ++ EXPECT_EQ(memcmp(buf, test_str_1, send_len), 0); ++ ++ /* can't receive the KeyUpdate without a control message */ ++ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_PEEK), -1); ++ ++ /* peek KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, MSG_PEEK); ++ ++ /* get KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ ++ /* update RX key */ ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ } ++ ++ TEST_F(tls, splice_rekey) ++ { ++ int send_len = TLS_PAYLOAD_MAX_LEN / 2; ++ char mem_send[TLS_PAYLOAD_MAX_LEN]; ++ char mem_recv[TLS_PAYLOAD_MAX_LEN]; ++ struct tls_crypto_info_keys tls12; ++ int p[2]; ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ memrnd(mem_send, sizeof(mem_send)); ++ ++ ASSERT_GE(pipe(p), 0); ++ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len); ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len); ++ ++ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, TLS_PAYLOAD_MAX_LEN, 0), send_len); ++ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); ++ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); ++ ++ /* can't splice the KeyUpdate */ ++ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, TLS_PAYLOAD_MAX_LEN, 0), -1); ++ EXPECT_EQ(errno, EINVAL); ++ ++ /* peek KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, MSG_PEEK); ++ ++ /* get KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ ++ /* can't splice before updating the key */ ++ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, TLS_PAYLOAD_MAX_LEN, 0), -1); ++ EXPECT_EQ(errno, EKEYEXPIRED); ++ ++ /* update RX key */ ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ ++ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, TLS_PAYLOAD_MAX_LEN, 0), send_len); ++ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); ++ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); ++ } ++ ++ TEST_F(tls, rekey_peek_splice) ++ { ++ char const *test_str_1 = "test_message_before_rekey"; ++ struct tls_crypto_info_keys tls12; ++ int send_len; ++ char buf[100]; ++ char mem_recv[TLS_PAYLOAD_MAX_LEN]; ++ int p[2]; ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ ASSERT_GE(pipe(p), 0); ++ ++ send_len = strlen(test_str_1) + 1; ++ EXPECT_EQ(send(self->fd, test_str_1, send_len, 0), send_len); ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_PEEK), send_len); ++ EXPECT_EQ(memcmp(buf, test_str_1, send_len), 0); ++ ++ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, TLS_PAYLOAD_MAX_LEN, 0), send_len); ++ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); ++ EXPECT_EQ(memcmp(mem_recv, test_str_1, send_len), 0); ++ } ++ ++ TEST_F(tls, rekey_getsockopt) ++ { ++ struct tls_crypto_info_keys tls12; ++ struct tls_crypto_info_keys tls12_get; ++ socklen_t len; ++ ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 0); ++ ++ len = tls12.len; ++ EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &tls12_get, &len), 0); ++ EXPECT_EQ(len, tls12.len); ++ EXPECT_EQ(memcmp(&tls12_get, &tls12, tls12.len), 0); ++ ++ len = tls12.len; ++ EXPECT_EQ(getsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12_get, &len), 0); ++ EXPECT_EQ(len, tls12.len); ++ EXPECT_EQ(memcmp(&tls12_get, &tls12, tls12.len), 0); ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ ++ len = tls12.len; ++ EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &tls12_get, &len), 0); ++ EXPECT_EQ(len, tls12.len); ++ EXPECT_EQ(memcmp(&tls12_get, &tls12, tls12.len), 0); ++ ++ len = tls12.len; ++ EXPECT_EQ(getsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12_get, &len), 0); ++ EXPECT_EQ(len, tls12.len); ++ EXPECT_EQ(memcmp(&tls12_get, &tls12, tls12.len), 0); ++ } ++ ++ TEST_F(tls, rekey_poll_pending) ++ { ++ char const *test_str = "test_message_after_rekey"; ++ struct tls_crypto_info_keys tls12; ++ struct pollfd pfd = { }; ++ int send_len; ++ int ret; ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ /* get KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ ++ /* send immediately after rekey */ ++ send_len = strlen(test_str) + 1; ++ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); ++ ++ /* key hasn't been updated, expect cfd to be non-readable */ ++ pfd.fd = self->cfd; ++ pfd.events = POLLIN; ++ EXPECT_EQ(poll(&pfd, 1, 0), 0); ++ ++ ret = fork(); ++ ASSERT_GE(ret, 0); ++ ++ if (ret) { ++ int pid2, status; ++ ++ /* wait before installing the new key */ ++ sleep(1); ++ ++ /* update RX key while poll() is sleeping */ ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ ++ pid2 = wait(&status); ++ EXPECT_EQ(pid2, ret); ++ EXPECT_EQ(status, 0); ++ } else { ++ pfd.fd = self->cfd; ++ pfd.events = POLLIN; ++ EXPECT_EQ(poll(&pfd, 1, 5000), 1); ++ ++ exit(!__test_passed(_metadata)); ++ } ++ } ++ ++ TEST_F(tls, rekey_poll_delay) ++ { ++ char const *test_str = "test_message_after_rekey"; ++ struct tls_crypto_info_keys tls12; ++ struct pollfd pfd = { }; ++ int send_len; ++ int ret; ++ ++ if (variant->tls_version != TLS_1_3_VERSION) ++ return; ++ ++ /* update TX key */ ++ tls_send_keyupdate(_metadata, self->fd); ++ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12, 1); ++ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); ++ ++ /* get KeyUpdate */ ++ tls_recv_keyupdate(_metadata, self->cfd, 0); ++ ++ ret = fork(); ++ ASSERT_GE(ret, 0); ++ ++ if (ret) { ++ int pid2, status; ++ ++ /* wait before installing the new key */ ++ sleep(1); ++ ++ /* update RX key while poll() is sleeping */ ++ EXPECT_EQ(setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); ++ ++ sleep(1); ++ send_len = strlen(test_str) + 1; ++ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); ++ ++ pid2 = wait(&status); ++ EXPECT_EQ(pid2, ret); ++ EXPECT_EQ(status, 0); ++ } else { ++ pfd.fd = self->cfd; ++ pfd.events = POLLIN; ++ EXPECT_EQ(poll(&pfd, 1, 5000), 1); ++ exit(!__test_passed(_metadata)); ++ } ++ } ++ ++ struct raw_rec { ++ unsigned int plain_len; ++ unsigned char plain_data[100]; ++ unsigned int cipher_len; ++ unsigned char cipher_data[128]; ++ }; ++ ++ /* TLS 1.2, AES_CCM, data, seqno:0, plaintext: 'Hello world' */ ++ static const struct raw_rec id0_data_l11 = { ++ .plain_len = 11, ++ .plain_data = { ++ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, ++ 0x72, 0x6c, 0x64, ++ }, ++ .cipher_len = 40, ++ .cipher_data = { ++ 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xa2, 0x33, ++ 0xde, 0x8d, 0x94, 0xf0, 0x29, 0x6c, 0xb1, 0xaf, ++ 0x6a, 0x75, 0xb2, 0x93, 0xad, 0x45, 0xd5, 0xfd, ++ 0x03, 0x51, 0x57, 0x8f, 0xf9, 0xcc, 0x3b, 0x42, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, ctrl, seqno:0, plaintext: '' */ ++ static const struct raw_rec id0_ctrl_l0 = { ++ .plain_len = 0, ++ .plain_data = { ++ }, ++ .cipher_len = 29, ++ .cipher_data = { ++ 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x38, 0x7b, ++ 0xa6, 0x1c, 0xdd, 0xa7, 0x19, 0x33, 0xab, 0xae, ++ 0x88, 0xe1, 0xd2, 0x08, 0x4f, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, data, seqno:0, plaintext: '' */ ++ static const struct raw_rec id0_data_l0 = { ++ .plain_len = 0, ++ .plain_data = { ++ }, ++ .cipher_len = 29, ++ .cipher_data = { ++ 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x37, 0x90, ++ 0x70, 0x45, 0x89, 0xfb, 0x5c, 0xc7, 0x89, 0x03, ++ 0x68, 0x80, 0xd3, 0xd8, 0xcc, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, data, seqno:1, plaintext: 'Hello world' */ ++ static const struct raw_rec id1_data_l11 = { ++ .plain_len = 11, ++ .plain_data = { ++ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, ++ 0x72, 0x6c, 0x64, ++ }, ++ .cipher_len = 40, ++ .cipher_data = { ++ 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x1a, 0x9c, ++ 0xd0, 0xa8, 0x9a, 0xd6, 0x69, 0xd6, 0x1a, 0xe3, ++ 0xb5, 0x1f, 0x0d, 0x2c, 0xe2, 0x97, 0x46, 0xff, ++ 0x2b, 0xcc, 0x5a, 0xc4, 0xa3, 0xb9, 0xef, 0xba, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, ctrl, seqno:1, plaintext: '' */ ++ static const struct raw_rec id1_ctrl_l0 = { ++ .plain_len = 0, ++ .plain_data = { ++ }, ++ .cipher_len = 29, ++ .cipher_data = { ++ 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0xf0, 0xfe, ++ 0xee, 0xd9, 0xe2, 0x5d, 0xc7, 0x11, 0x4c, 0xe6, ++ 0xb4, 0x7e, 0xef, 0x40, 0x2b, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, data, seqno:1, plaintext: '' */ ++ static const struct raw_rec id1_data_l0 = { ++ .plain_len = 0, ++ .plain_data = { ++ }, ++ .cipher_len = 29, ++ .cipher_data = { ++ 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0xfc, 0x86, ++ 0xc8, 0xf0, 0x55, 0xf9, 0x47, 0x3f, 0x74, 0xdc, ++ 0xc9, 0xbf, 0xfe, 0x5b, 0xb1, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: 'Hello world' */ ++ static const struct raw_rec id2_ctrl_l11 = { ++ .plain_len = 11, ++ .plain_data = { ++ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, ++ 0x72, 0x6c, 0x64, ++ }, ++ .cipher_len = 40, ++ .cipher_data = { ++ 0x16, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19, ++ 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87, ++ 0x2a, 0x04, 0x11, 0x3d, 0xf8, 0x64, 0x5f, 0x36, ++ 0x8b, 0xa8, 0xee, 0x4c, 0x6d, 0x62, 0xa5, 0x00, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, data, seqno:2, plaintext: 'Hello world' */ ++ static const struct raw_rec id2_data_l11 = { ++ .plain_len = 11, ++ .plain_data = { ++ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, ++ 0x72, 0x6c, 0x64, ++ }, ++ .cipher_len = 40, ++ .cipher_data = { ++ 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19, ++ 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87, ++ 0x8e, 0xa1, 0xd0, 0xcd, 0x33, 0xb5, 0x86, 0x2b, ++ 0x17, 0xf1, 0x52, 0x2a, 0x55, 0x62, 0x65, 0x11, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: '' */ ++ static const struct raw_rec id2_ctrl_l0 = { ++ .plain_len = 0, ++ .plain_data = { ++ }, ++ .cipher_len = 29, ++ .cipher_data = { ++ 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x5c, 0x0e, ++ 0x41, 0xdd, 0xba, 0xd3, 0xcc, 0xcf, 0x6d, 0xd9, ++ 0x06, 0xdb, 0x79, 0xe5, 0x5d, ++ }, ++ }; ++ ++ /* TLS 1.2, AES_CCM, data, seqno:2, plaintext: '' */ ++ static const struct raw_rec id2_data_l0 = { ++ .plain_len = 0, ++ .plain_data = { ++ }, ++ .cipher_len = 29, ++ .cipher_data = { ++ 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x02, 0xc3, 0xca, 0x26, ++ 0x22, 0xe4, 0x25, 0xfb, 0x5f, 0x6d, 0xbf, 0x83, ++ 0x30, 0x48, 0x69, 0x1a, 0x47, ++ }, ++ }; ++ ++ FIXTURE(zero_len) ++ { ++ int fd, cfd; ++ bool notls; ++ }; ++ ++ FIXTURE_VARIANT(zero_len) ++ { ++ const struct raw_rec *recs[4]; ++ ssize_t recv_ret[4]; ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, data_data_data) ++ { ++ .recs = { &id0_data_l11, &id1_data_l11, &id2_data_l11, }, ++ .recv_ret = { 33, -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, data_0ctrl_data) ++ { ++ .recs = { &id0_data_l11, &id1_ctrl_l0, &id2_data_l11, }, ++ .recv_ret = { 11, 0, 11, -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0data) ++ { ++ .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l0, }, ++ .recv_ret = { -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, 0data_0data_ctrl) ++ { ++ .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l11, }, ++ .recv_ret = { 0, 11, -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0ctrl) ++ { ++ .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l0, }, ++ .recv_ret = { 0, 0, -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, 0ctrl_0ctrl_0ctrl) ++ { ++ .recs = { &id0_ctrl_l0, &id1_ctrl_l0, &id2_ctrl_l0, }, ++ .recv_ret = { 0, 0, 0, -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, 0data_0data_data) ++ { ++ .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l11, }, ++ .recv_ret = { 11, -EAGAIN, }, ++ }; ++ ++ FIXTURE_VARIANT_ADD(zero_len, data_0data_0data) ++ { ++ .recs = { &id0_data_l11, &id1_data_l0, &id2_data_l0, }, ++ .recv_ret = { 11, -EAGAIN, }, ++ }; ++ ++ FIXTURE_SETUP(zero_len) ++ { ++ struct tls_crypto_info_keys tls12; ++ int ret; ++ ++ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128, ++ &tls12, 0); ++ ++ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); ++ if (self->notls) ++ return; ++ ++ /* Don't install keys on fd, we'll send raw records */ ++ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); ++ ASSERT_EQ(ret, 0); ++ } ++ ++ FIXTURE_TEARDOWN(zero_len) ++ { ++ close(self->fd); ++ close(self->cfd); ++ } ++ ++ TEST_F(zero_len, test) ++ { ++ const struct raw_rec *const *rec; ++ unsigned char buf[128]; ++ int rec_off; ++ int i; ++ ++ for (i = 0; i < 4 && variant->recs[i]; i++) ++ EXPECT_EQ(send(self->fd, variant->recs[i]->cipher_data, ++ variant->recs[i]->cipher_len, 0), ++ variant->recs[i]->cipher_len); ++ ++ rec = &variant->recs[0]; ++ rec_off = 0; ++ for (i = 0; i < 4; i++) { ++ int j, ret; ++ ++ ret = variant->recv_ret[i] >= 0 ? variant->recv_ret[i] : -1; ++ EXPECT_EQ(__tls_recv_cmsg(_metadata, self->cfd, NULL, ++ buf, sizeof(buf), MSG_DONTWAIT), ret); ++ if (ret == -1) ++ EXPECT_EQ(errno, -variant->recv_ret[i]); ++ if (variant->recv_ret[i] == -EAGAIN) ++ break; ++ ++ for (j = 0; j < ret; j++) { ++ while (rec_off == (*rec)->plain_len) { ++ rec++; ++ rec_off = 0; ++ } ++ EXPECT_EQ(buf[j], (*rec)->plain_data[rec_off]); ++ rec_off++; ++ } ++ } ++ }; ++ +++>>>>>>> a61a3e961baf (selftests: tls: add tests for zero-length records) + FIXTURE(tls_err) + { + int fd, cfd; +* Unmerged path tools/testing/selftests/net/tls.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/bec324f3.failed b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/bec324f3.failed new file mode 100644 index 0000000000000..adec48b9dcd57 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/bec324f3.failed @@ -0,0 +1,112 @@ +ceph: fix race condition where r_parent becomes stale before sending message + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.49.1.el9_6 +Rebuild_CHGLOG: - ceph: fix client race condition where r_parent becomes stale before sending message (Alex Markuze) [RHEL-114962] +Rebuild_FUZZ: 95.60% +commit-author Alex Markuze +commit bec324f33d1ed346394b2eee25bf6dbf3511f727 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/bec324f3.failed + +When the parent directory's i_rwsem is not locked, req->r_parent may become +stale due to concurrent operations (e.g. rename) between dentry lookup and +message creation. Validate that r_parent matches the encoded parent inode +and update to the correct inode if a mismatch is detected. + +[ idryomov: folded a follow-up fix from Alex to drop extra reference + from ceph_get_reply_dir() in ceph_fill_trace(): + + ceph_get_reply_dir() may return a different, referenced inode when + r_parent is stale and the parent directory lock is not held. + ceph_fill_trace() used that inode but failed to drop the reference + when it differed from req->r_parent, leaking an inode reference. + + Keep the directory inode in a local variable and iput() it at + function end if it does not match req->r_parent. ] + + Cc: stable@vger.kernel.org + Signed-off-by: Alex Markuze + Reviewed-by: Viacheslav Dubeyko + Signed-off-by: Ilya Dryomov +(cherry picked from commit bec324f33d1ed346394b2eee25bf6dbf3511f727) + Signed-off-by: Jonathan Maple + +# Conflicts: +# fs/ceph/inode.c +diff --cc fs/ceph/inode.c +index 34cfeb0fba9e,f67025465de0..000000000000 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@@ -1488,14 -1567,16 +1534,20 @@@ int ceph_fill_trace(struct super_block + struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; + struct inode *in = NULL; + struct ceph_vino tvino, dvino; +++<<<<<<< HEAD + + struct ceph_fs_client *fsc = ceph_sb_to_client(sb); +++======= ++ struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); ++ struct ceph_client *cl = fsc->client; ++ struct inode *parent_dir = NULL; +++>>>>>>> bec324f33d1e (ceph: fix race condition where r_parent becomes stale before sending message) + int err = 0; + + - doutc(cl, "%p is_dentry %d is_target %d\n", req, + - rinfo->head->is_dentry, rinfo->head->is_target); + + dout("fill_trace %p is_dentry %d is_target %d\n", req, + + rinfo->head->is_dentry, rinfo->head->is_target); + + if (!rinfo->head->is_target && !rinfo->head->is_dentry) { + - doutc(cl, "reply is empty!\n"); + + dout("fill_trace reply is empty!\n"); + if (rinfo->head->result == 0 && req->r_parent) + ceph_invalidate_dir_request(req); + return 0; +@@@ -1557,11 -1645,11 +1616,11 @@@ retry_lookup + + if (!dn) { + dn = d_alloc(parent, &dname); + - doutc(cl, "d_alloc %p '%.*s' = %p\n", parent, + - dname.len, dname.name, dn); + + dout("d_alloc %p '%.*s' = %p\n", parent, + + dname.len, dname.name, dn); + if (!dn) { + dput(parent); +- ceph_fname_free_buffer(dir, &oname); ++ ceph_fname_free_buffer(parent_dir, &oname); + err = -ENOMEM; + goto done; + } +@@@ -1574,9 -1662,9 +1633,15 @@@ + } else if (d_really_is_positive(dn) && + (ceph_ino(d_inode(dn)) != tvino.ino || + ceph_snap(d_inode(dn)) != tvino.snap)) { +++<<<<<<< HEAD + + dout(" dn %p points to wrong inode %p\n", + + dn, d_inode(dn)); + + ceph_dir_clear_ordered(dir); +++======= ++ doutc(cl, " dn %p points to wrong inode %p\n", ++ dn, d_inode(dn)); ++ ceph_dir_clear_ordered(parent_dir); +++>>>>>>> bec324f33d1e (ceph: fix race condition where r_parent becomes stale before sending message) + d_delete(dn); + dput(dn); + goto retry_lookup; +@@@ -1763,7 -1848,10 +1828,14 @@@ + &dvino, ptvino); + } + done: +++<<<<<<< HEAD + + dout("fill_trace done err=%d\n", err); +++======= ++ /* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */ ++ if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent)) ++ iput(parent_dir); ++ doutc(cl, "done err=%d\n", err); +++>>>>>>> bec324f33d1e (ceph: fix race condition where r_parent becomes stale before sending message) + return err; + } + +* Unmerged path fs/ceph/inode.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/f0030752.failed b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/f0030752.failed new file mode 100644 index 0000000000000..ded2a6676556d --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/f0030752.failed @@ -0,0 +1,539 @@ +ice: Implement PTP support for E830 devices + +jira LE-4311 +Rebuild_History Non-Buildable kernel-5.14.0-570.49.1.el9_6 +commit-author Michal Michalik +commit f003075227864344c14f53302c28acd0174d9225 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/f0030752.failed + +Add specific functions and definitions for E830 devices to enable +PTP support. + +E830 devices support direct write to GLTSYN_ registers without shadow +registers and 64 bit read of PHC time. + +Enable PTM for E830 device, which is required for cross timestamp and +and dependency on PCIE_PTM for ICE_HWTS. + +Check X86_FEATURE_ART for E830 as it may not be present in the CPU. + + Cc: Anna-Maria Behnsen + Cc: Frederic Weisbecker + Cc: Thomas Gleixner + Reviewed-by: Przemek Kitszel +Co-developed-by: Jacob Keller + Signed-off-by: Jacob Keller +Co-developed-by: Milena Olech + Signed-off-by: Milena Olech +Co-developed-by: Paul Greenwalt + Signed-off-by: Paul Greenwalt + Signed-off-by: Michal Michalik +Co-developed-by: Karol Kolacinski + Signed-off-by: Karol Kolacinski + Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) + Signed-off-by: Tony Nguyen +(cherry picked from commit f003075227864344c14f53302c28acd0174d9225) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/net/ethernet/intel/ice/ice_ptp_hw.h +diff --cc drivers/net/ethernet/intel/ice/ice_ptp_hw.h +index f2c2a46a49a8,8442d1d60351..000000000000 +--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h ++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +@@@ -326,8 -324,7 +326,12 @@@ extern const struct ice_vernier_info_e8 + */ + #define ICE_E810_PLL_FREQ 812500000 + #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +++<<<<<<< HEAD + +#define ICE_E810_OUT_PROP_DELAY_NS 1 + +#define ICE_E825C_OUT_PROP_DELAY_NS 11 +++======= ++ #define ICE_E810_E830_SYNC_DELAY 0 +++>>>>>>> f00307522786 (ice: Implement PTP support for E830 devices) + + /* Device agnostic functions */ + u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); +diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig +index 759ff42a087d..1837db6aad73 100644 +--- a/drivers/net/ethernet/intel/Kconfig ++++ b/drivers/net/ethernet/intel/Kconfig +@@ -351,7 +351,7 @@ config ICE_SWITCHDEV + config ICE_HWTS + bool "Support HW cross-timestamp on platforms with PTM support" + default y +- depends on ICE && X86 ++ depends on ICE && X86 && PCIE_PTM + help + Say Y to enable hardware supported cross-timestamping on platforms + with PCIe PTM support. The cross-timestamp is available through +diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +index 8d31bfe28cc8..b692be1cf7bf 100644 +--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h ++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +@@ -533,10 +533,22 @@ + #define PFPM_WUS_MAG_M BIT(1) + #define PFPM_WUS_MNG_M BIT(3) + #define PFPM_WUS_FW_RST_WK_M BIT(31) ++#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 ++#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 + #define E830_PRTMAC_CL01_PS_QNT 0x001E32A0 + #define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0) + #define E830_PRTMAC_CL01_QNT_THR 0x001E3320 + #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) ++#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) ++#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) ++#define E830_GLPTM_ART_CTL 0x00088B50 ++#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0) ++#define E830_GLPTM_ART_TIME_H 0x00088B54 ++#define E830_GLPTM_ART_TIME_L 0x00088B58 ++#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) ++#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) ++#define E830_PFPTM_SEM 0x00088B00 ++#define E830_PFPTM_SEM_BUSY_M BIT(0) + #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) + #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) + #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 7aa9dbc9e044..a53dbb029c76 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -4043,8 +4043,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) + } + + clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); +- if (func_caps->common_cap.ieee_1588 && +- !(pf->hw.mac_type == ICE_MAC_E830)) ++ if (func_caps->common_cap.ieee_1588) + set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); + + pf->max_pf_txqs = func_caps->common_cap.num_txq; +@@ -5031,6 +5030,12 @@ static int ice_init(struct ice_pf *pf) + if (err) + return err; + ++ if (pf->hw.mac_type == ICE_MAC_E830) { ++ err = pci_enable_ptm(pf->pdev, NULL); ++ if (err) ++ dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); ++ } ++ + err = ice_alloc_vsis(pf); + if (err) + goto err_alloc_vsis; +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c +index 2c7e8043f128..290b2839c8be 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c +@@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + ++ if (hw->mac_type == ICE_MAC_E830) { ++ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx)); ++ ++ /* Read the system timestamp post PHC read */ ++ ptp_read_system_postts(sts); ++ ++ return clk_time; ++ } ++ + lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); + + /* Read the system timestamp post PHC read */ +@@ -1305,6 +1314,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) + + switch (hw->mac_type) { + case ICE_MAC_E810: ++ case ICE_MAC_E830: + err = 0; + break; + case ICE_MAC_GENERIC: +@@ -1351,6 +1361,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) + + switch (hw->mac_type) { + case ICE_MAC_E810: ++ case ICE_MAC_E830: + err = 0; + break; + case ICE_MAC_GENERIC: +@@ -1418,7 +1429,8 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup) + + switch (hw->mac_type) { + case ICE_MAC_E810: +- /* Do not reconfigure E810 PHY */ ++ case ICE_MAC_E830: ++ /* Do not reconfigure E810 or E830 PHY */ + return; + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: +@@ -1451,6 +1463,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) + + switch (hw->mac_type) { + case ICE_MAC_E810: ++ case ICE_MAC_E830: + return 0; + case ICE_MAC_GENERIC: { + int quad; +@@ -2192,6 +2205,21 @@ static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = { + .dev_time_h[1] = GLTSYN_HHTIME_H(1), + }; + ++#ifdef CONFIG_ICE_HWTS ++static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = { ++ .lock_reg = E830_PFPTM_SEM, ++ .lock_busy = E830_PFPTM_SEM_BUSY_M, ++ .ctl_reg = E830_GLPTM_ART_CTL, ++ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M, ++ .art_time_l = E830_GLPTM_ART_TIME_L, ++ .art_time_h = E830_GLPTM_ART_TIME_H, ++ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0), ++ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0), ++ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1), ++ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1), ++}; ++ ++#endif /* CONFIG_ICE_HWTS */ + /** + * struct ice_crosststamp_ctx - Device cross timestamp context + * @snapshot: snapshot of system clocks for historic interpolation +@@ -2313,6 +2341,11 @@ static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, + case ICE_MAC_GENERIC_3K_E825: + ctx.cfg = &ice_crosststamp_cfg_e82x; + break; ++#ifdef CONFIG_ICE_HWTS ++ case ICE_MAC_E830: ++ ctx.cfg = &ice_crosststamp_cfg_e830; ++ break; ++#endif /* CONFIG_ICE_HWTS */ + default: + return -EOPNOTSUPP; + } +@@ -2648,6 +2681,28 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf) + } + } + ++/** ++ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support ++ * @pf: Board private structure ++ * ++ * Assign functions to the PTP capabiltiies structure for E830 devices. ++ * Functions which operate across all device families should be set directly ++ * in ice_ptp_set_caps. Only add functions here which are distinct for E830 ++ * devices. ++ */ ++static void ice_ptp_set_funcs_e830(struct ice_pf *pf) ++{ ++#ifdef CONFIG_ICE_HWTS ++ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART)) ++ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; ++ ++#endif /* CONFIG_ICE_HWTS */ ++ /* Rest of the config is the same as base E810 */ ++ pf->ptp.ice_pin_desc = ice_pin_desc_e810; ++ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); ++ ice_ptp_setup_pin_cfg(pf); ++} ++ + /** + * ice_ptp_set_caps - Set PTP capabilities + * @pf: Board private structure +@@ -2674,6 +2729,9 @@ static void ice_ptp_set_caps(struct ice_pf *pf) + case ICE_MAC_E810: + ice_ptp_set_funcs_e810(pf); + return; ++ case ICE_MAC_E830: ++ ice_ptp_set_funcs_e830(pf); ++ return; + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_set_funcs_e82x(pf); +@@ -2834,6 +2892,16 @@ irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) + + set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); + return IRQ_WAKE_THREAD; ++ case ICE_MAC_E830: ++ /* E830 can read timestamps in the top half using rd32() */ ++ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { ++ /* Process outstanding Tx timestamps. If there ++ * is more work, re-arm the interrupt to trigger again. ++ */ ++ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); ++ ice_flush(hw); ++ } ++ return IRQ_HANDLED; + default: + return IRQ_HANDLED; + } +@@ -3219,6 +3287,7 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) + + switch (hw->mac_type) { + case ICE_MAC_E810: ++ case ICE_MAC_E830: + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num); + case ICE_MAC_GENERIC: +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +index 2b0d2b7df785..90eb8ed6cee7 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c ++++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +@@ -829,6 +829,7 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, + */ + switch (hw->mac_type) { + case ICE_MAC_E810: ++ case ICE_MAC_E830: + return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; + default: + break; +@@ -895,6 +896,17 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) + ice_flush(hw); + } + ++/** ++ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay ++ * @hw: pointer to HW struct ++ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds ++ */ ++static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) ++{ ++ wr32(hw, GLTSYN_SYNC_DLAY, delay); ++ ice_flush(hw); ++} ++ + /* 56G PHY device functions + * + * The following functions operate on devices with the ETH 56G PHY. +@@ -5043,8 +5055,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw) + u8 tmr_idx; + int err; + +- /* Ensure synchronization delay is zero */ +- wr32(hw, GLTSYN_SYNC_DLAY, 0); ++ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); + + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), +@@ -5445,6 +5456,128 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) + init_waitqueue_head(&ptp->phy.e810.atqbal_wq); + } + ++/* E830 functions ++ * ++ * The following functions operate on the E830 series devices. ++ * ++ */ ++ ++/** ++ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization ++ * @hw: pointer to HW struct ++ * ++ * Perform E830-specific PTP hardware clock initialization steps. ++ */ ++static void ice_ptp_init_phc_e830(const struct ice_hw *hw) ++{ ++ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); ++} ++ ++/** ++ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change ++ * @hw: pointer to HW struct ++ * @incval: The new 40bit increment value to prepare ++ * ++ * Prepare the PHY port for a new increment value by programming the PHC ++ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is ++ * completed by FW automatically. ++ */ ++static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw, ++ u64 incval) ++{ ++ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; ++ ++ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval)); ++ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval)); ++} ++ ++/** ++ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time ++ * @hw: Board private structure ++ * @time: Time to initialize the PHY port clock to ++ * ++ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the ++ * initial clock time. The time will not actually be programmed until the ++ * driver issues an ICE_PTP_INIT_TIME command. ++ * ++ * The time value is the upper 32 bits of the PHY timer, usually in units of ++ * nominal nanoseconds. ++ */ ++static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw, ++ u64 time) ++{ ++ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; ++ ++ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0); ++ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time)); ++ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time)); ++} ++ ++/** ++ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command ++ * @hw: pointer to HW struct ++ * @cmd: Command to be sent to the port ++ * ++ * Prepare the external PHYs connected to this device for a timer sync ++ * command. ++ * ++ * Return: 0 on success, negative error code when PHY write failed ++ */ ++static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) ++{ ++ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); ++ ++ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val); ++} ++ ++/** ++ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY ++ * @hw: pointer to the HW struct ++ * @idx: the timestamp index to read ++ * @tstamp: on return, the 40bit timestamp value ++ * ++ * Read a 40bit timestamp value out of the timestamp block of the external PHY ++ * on the E830 device. ++ */ ++static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx, ++ u64 *tstamp) ++{ ++ u32 hi, lo; ++ ++ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx)); ++ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx)); ++ ++ /* For E830 devices, the timestamp is reported with the lower 32 bits ++ * in the low register, and the upper 8 bits in the high register. ++ */ ++ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | ++ FIELD_PREP(PHY_EXT_40B_LOW_M, lo); ++} ++ ++/** ++ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register ++ * @hw: pointer to the HW struct ++ * @port: the PHY port to read ++ * @tstamp_ready: contents of the Tx memory status register ++ */ ++static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port, ++ u64 *tstamp_ready) ++{ ++ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H); ++ *tstamp_ready <<= 32; ++ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L); ++} ++ ++/** ++ * ice_ptp_init_phy_e830 - initialize PHY parameters ++ * @ptp: pointer to the PTP HW struct ++ */ ++static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp) ++{ ++ ptp->num_lports = 8; ++ ptp->ports_per_phy = 4; ++} ++ + /* Device agnostic functions + * + * The following functions implement shared behavior common to all devices, +@@ -5515,6 +5648,9 @@ void ice_ptp_init_hw(struct ice_hw *hw) + case ICE_MAC_E810: + ice_ptp_init_phy_e810(ptp); + break; ++ case ICE_MAC_E830: ++ ice_ptp_init_phy_e830(ptp); ++ break; + case ICE_MAC_GENERIC: + ice_ptp_init_phy_e82x(ptp); + break; +@@ -5612,6 +5748,8 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) + switch (hw->mac_type) { + case ICE_MAC_E810: + return ice_ptp_port_cmd_e810(hw, cmd); ++ case ICE_MAC_E830: ++ return ice_ptp_port_cmd_e830(hw, cmd); + default: + break; + } +@@ -5682,6 +5820,12 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + /* Source timers */ ++ /* For E830 we don't need to use shadow registers, its automatic */ ++ if (hw->mac_type == ICE_MAC_E830) { ++ ice_ptp_write_direct_phc_time_e830(hw, time); ++ return 0; ++ } ++ + wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); + wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); + wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); +@@ -5730,6 +5874,12 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) + + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + ++ /* For E830 we don't need to use shadow registers, its automatic */ ++ if (hw->mac_type == ICE_MAC_E830) { ++ ice_ptp_write_direct_incval_e830(hw, incval); ++ return 0; ++ } ++ + /* Shadow Adjust */ + wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); + wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); +@@ -5807,6 +5957,9 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) + case ICE_MAC_E810: + err = ice_ptp_prep_phy_adj_e810(hw, adj); + break; ++ case ICE_MAC_E830: ++ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */ ++ return 0; + case ICE_MAC_GENERIC: + err = ice_ptp_prep_phy_adj_e82x(hw, adj); + break; +@@ -5839,6 +5992,9 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) + switch (hw->mac_type) { + case ICE_MAC_E810: + return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); ++ case ICE_MAC_E830: ++ ice_read_phy_tstamp_e830(hw, idx, tstamp); ++ return 0; + case ICE_MAC_GENERIC: + return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); + case ICE_MAC_GENERIC_3K_E825: +@@ -5961,6 +6117,9 @@ int ice_ptp_init_phc(struct ice_hw *hw) + switch (hw->mac_type) { + case ICE_MAC_E810: + return ice_ptp_init_phc_e810(hw); ++ case ICE_MAC_E830: ++ ice_ptp_init_phc_e830(hw); ++ return 0; + case ICE_MAC_GENERIC: + return ice_ptp_init_phc_e82x(hw); + case ICE_MAC_GENERIC_3K_E825: +@@ -5987,13 +6146,15 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) + case ICE_MAC_E810: + return ice_get_phy_tx_tstamp_ready_e810(hw, block, + tstamp_ready); ++ case ICE_MAC_E830: ++ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready); ++ return 0; + case ICE_MAC_GENERIC: + return ice_get_phy_tx_tstamp_ready_e82x(hw, block, + tstamp_ready); + case ICE_MAC_GENERIC_3K_E825: + return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, + tstamp_ready); +- break; + default: + return -EOPNOTSUPP; + } +* Unmerged path drivers/net/ethernet/intel/ice/ice_ptp_hw.h diff --git a/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/rebuild.details.txt b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/rebuild.details.txt new file mode 100644 index 0000000000000..88522fc34148f --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.49.1.el9_6/rebuild.details.txt @@ -0,0 +1,24 @@ +Rebuild_History BUILDABLE +Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% +Number of commits in upstream range v5.14~1..kernel-mainline: 324124 +Number of commits in rpm: 33 +Number of commits matched with upstream: 31 (93.94%) +Number of commits in upstream but not in rpm: 324093 +Number of commits NOT found in upstream: 2 (6.06%) + +Rebuilding Kernel on Branch rocky9_6_rebuild_kernel-5.14.0-570.49.1.el9_6 for kernel-5.14.0-570.49.1.el9_6 +Clean Cherry Picks: 25 (80.65%) +Empty Cherry Picks: 6 (19.35%) +_______________________________ + +__EMPTY COMMITS__________________________ +690e47d1403e90b7f2366f03b52ed3304194c793 sched/rt: Fix race in push_rt_task +914639464b760a4ec659a46cc2de9a2fdc4eff5a ice: Add in/out PTP pin delays +f003075227864344c14f53302c28acd0174d9225 ice: Implement PTP support for E830 devices +15f519e9f883b316d86e2bb6b767a023aafd9d83 ceph: fix race condition validating r_parent before applying state +bec324f33d1ed346394b2eee25bf6dbf3511f727 ceph: fix race condition where r_parent becomes stale before sending message +a61a3e961baff65b0a49f862fe21ce304f279b24 selftests: tls: add tests for zero-length records + +__CHANGES NOT IN UPSTREAM________________ +Porting to Rocky Linux 9, debranding and Rocky branding' +Ensure aarch64 kernel is not compressed' diff --git a/drivers/base/core.c b/drivers/base/core.c index b2fe401abd4aa..b953b69e0fe6c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -2559,7 +2558,6 @@ static const char *dev_uevent_name(const struct kobject *kobj) static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); - struct device_driver *driver; int retval = 0; /* add device node properties if present */ @@ -2588,12 +2586,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - /* Synchronize with module_remove_driver() */ - rcu_read_lock(); - driver = READ_ONCE(dev->driver); - if (driver) - add_uevent_var(env, "DRIVER=%s", driver->name); - rcu_read_unlock(); + if (dev->driver) + add_uevent_var(env, "DRIVER=%s", dev->driver->name); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2663,8 +2657,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; + /* Synchronize with really_probe() */ + device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); + device_unlock(dev); if (retval) goto out; diff --git a/drivers/base/module.c b/drivers/base/module.c index 851cc5367c04c..46ad4d636731d 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -7,7 +7,6 @@ #include #include #include -#include #include "base.h" static char *make_driver_name(struct device_driver *drv) @@ -78,9 +77,6 @@ void module_remove_driver(struct device_driver *drv) if (!drv) return; - /* Synchronize with dev_uevent() */ - synchronize_rcu(); - sysfs_remove_link(&drv->p->kobj, "module"); if (drv->owner) diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 951614d84defe..1e8fa249a1c4a 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -275,15 +275,10 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, struct cppc_cpudata *cpu_data = policy->driver_data; unsigned int cpu = policy->cpu; struct cpufreq_freqs freqs; - u32 desired_perf; int ret = 0; - desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); - /* Return if it is exactly the same perf */ - if (desired_perf == cpu_data->perf_ctrls.desired_perf) - return ret; - - cpu_data->perf_ctrls.desired_perf = desired_perf; + cpu_data->perf_ctrls.desired_perf = + cppc_khz_to_perf(&cpu_data->perf_caps, target_freq); freqs.old = policy->cur; freqs.new = target_freq; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1b87b600197de..c7179886ed75f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -758,7 +758,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); tx_kick_pending: if (BNXT_TX_PTP_IS_SET(lflags)) { - txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; + txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; atomic64_inc(&bp->ptp_cfg->stats.ts_err); if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) /* set SKB to err so PTP worker will clean up */ @@ -766,7 +766,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); - txr->tx_buf_ring[txr->tx_prod].skb = NULL; + txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL; dev_core_stats_tx_dropped_inc(dev); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig index ad80c0fa96a68..96709875fe4f2 100644 --- a/drivers/net/ethernet/cisco/enic/Kconfig +++ b/drivers/net/ethernet/cisco/enic/Kconfig @@ -6,5 +6,6 @@ config ENIC tristate "Cisco VIC Ethernet NIC Support" depends on PCI + select PAGE_POOL help This enables the support for the Cisco VIC Ethernet card. diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile index c3b6febfdbe44..a96b8332e6e2a 100644 --- a/drivers/net/ethernet/cisco/enic/Makefile +++ b/drivers/net/ethernet/cisco/enic/Makefile @@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ - enic_ethtool.o enic_api.o enic_clsf.o + enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h index 462c5435a206b..bfb3f14e89f5d 100644 --- a/drivers/net/ethernet/cisco/enic/cq_desc.h +++ b/drivers/net/ethernet/cisco/enic/cq_desc.h @@ -40,28 +40,7 @@ struct cq_desc { #define CQ_DESC_COMP_NDX_BITS 12 #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) -static inline void cq_desc_dec(const struct cq_desc *desc_arg, - u8 *type, u8 *color, u16 *q_number, u16 *completed_index) -{ - const struct cq_desc *desc = desc_arg; - const u8 type_color = desc->type_color; - - *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; - - /* - * Make sure color bit is read from desc *before* other fields - * are read from desc. Hardware guarantees color bit is last - * bit (byte) written. Adding the rmb() prevents the compiler - * and/or CPU from reordering the reads which would potentially - * result in reading stale values. - */ - - rmb(); - - *type = type_color & CQ_DESC_TYPE_MASK; - *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; - *completed_index = le16_to_cpu(desc->completed_index) & - CQ_DESC_COMP_NDX_MASK; -} +#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1)) +#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1)) #endif /* _CQ_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h index d25426470a293..50787cff29db0 100644 --- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h +++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h @@ -17,12 +17,22 @@ struct cq_enet_wq_desc { u8 type_color; }; -static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, - u8 *type, u8 *color, u16 *q_number, u16 *completed_index) -{ - cq_desc_dec((struct cq_desc *)desc, type, - color, q_number, completed_index); -} +/* + * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET + */ +#define VNIC_RQ_ALL (~0ULL) + +#define VNIC_RQ_CQ_ENTRY_SIZE_16 0 +#define VNIC_RQ_CQ_ENTRY_SIZE_32 1 +#define VNIC_RQ_CQ_ENTRY_SIZE_64 2 + +#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16) +#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32) +#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64) + +#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \ + VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \ + VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE) /* Completion queue descriptor: Ethernet receive queue, 16B */ struct cq_enet_rq_desc { @@ -36,6 +46,45 @@ struct cq_enet_rq_desc { u8 type_color; }; +/* Completion queue descriptor: Ethernet receive queue, 32B */ +struct cq_enet_rq_desc_32 { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 fetch_index_flags; + __le32 time_stamp; + __le16 time_stamp2; + __le16 pie_info; + __le32 pie_info2; + __le16 pie_info3; + u8 pie_info4; + u8 type_color; +}; + +/* Completion queue descriptor: Ethernet receive queue, 64B */ +struct cq_enet_rq_desc_64 { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 fetch_index_flags; + __le32 time_stamp; + __le16 time_stamp2; + __le16 pie_info; + __le32 pie_info2; + __le16 pie_info3; + u8 pie_info4; + u8 reserved[32]; + u8 type_color; +}; + #define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) #define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) #define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) @@ -88,85 +137,4 @@ struct cq_enet_rq_desc { #define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) #define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) -static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, - u8 *type, u8 *color, u16 *q_number, u16 *completed_index, - u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, - u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, - u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof, - u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, - u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, - u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) -{ - u16 completed_index_flags; - u16 q_number_rss_type_flags; - u16 bytes_written_flags; - - cq_desc_dec((struct cq_desc *)desc, type, - color, q_number, completed_index); - - completed_index_flags = le16_to_cpu(desc->completed_index_flags); - q_number_rss_type_flags = - le16_to_cpu(desc->q_number_rss_type_flags); - bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); - - *ingress_port = (completed_index_flags & - CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; - *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? - 1 : 0; - *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? - 1 : 0; - *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? - 1 : 0; - - *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & - CQ_ENET_RQ_DESC_RSS_TYPE_MASK); - *csum_not_calc = (q_number_rss_type_flags & - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; - - *rss_hash = le32_to_cpu(desc->rss_hash); - - *bytes_written = bytes_written_flags & - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; - *packet_error = (bytes_written_flags & - CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; - *vlan_stripped = (bytes_written_flags & - CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; - - /* - * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) - */ - *vlan_tci = le16_to_cpu(desc->vlan); - - if (*fcoe) { - *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & - CQ_ENET_RQ_DESC_FCOE_SOF_MASK); - *fcoe_fc_crc_ok = (desc->flags & - CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; - *fcoe_enc_error = (desc->flags & - CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; - *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> - CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & - CQ_ENET_RQ_DESC_FCOE_EOF_MASK); - *checksum = 0; - } else { - *fcoe_sof = 0; - *fcoe_fc_crc_ok = 0; - *fcoe_enc_error = 0; - *fcoe_eof = 0; - *checksum = le16_to_cpu(desc->checksum_fcoe); - } - - *tcp_udp_csum_ok = - (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; - *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; - *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; - *ipv4_csum_ok = - (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; - *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; - *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; - *ipv4_fragment = - (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; - *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; -} - #endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 300ad05ee05bc..301b3f3114afa 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -17,21 +17,28 @@ #include "vnic_nic.h" #include "vnic_rss.h" #include +#include #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" #define ENIC_BARS_MAX 6 -#define ENIC_WQ_MAX 8 -#define ENIC_RQ_MAX 8 -#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) -#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) +#define ENIC_WQ_MAX 256 +#define ENIC_RQ_MAX 256 +#define ENIC_RQ_MIN_DEFAULT 8 #define ENIC_WQ_NAPI_BUDGET 256 #define ENIC_AIC_LARGE_PKT_DIFF 3 +enum ext_cq { + ENIC_RQ_CQ_ENTRY_SIZE_16, + ENIC_RQ_CQ_ENTRY_SIZE_32, + ENIC_RQ_CQ_ENTRY_SIZE_64, + ENIC_RQ_CQ_ENTRY_SIZE_MAX, +}; + struct enic_msix_entry { int requested; char devname[IFNAMSIZ + 8]; @@ -77,6 +84,10 @@ struct enic_rx_coal { #define ENIC_SET_INSTANCE (1 << 3) #define ENIC_SET_HOST (1 << 4) +#define MAX_TSO BIT(16) +#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS) +#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) + struct enic_port_profile { u32 set; u8 request; @@ -128,6 +139,53 @@ struct vxlan_offload { u8 flags; }; +struct enic_wq_stats { + u64 packets; /* pkts queued for Tx */ + u64 stopped; /* Tx ring almost full, queue stopped */ + u64 wake; /* Tx ring no longer full, queue woken up*/ + u64 tso; /* non-encap tso pkt */ + u64 encap_tso; /* encap tso pkt */ + u64 encap_csum; /* encap HW csum */ + u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */ + u64 csum_none; /* HW csum not required */ + u64 bytes; /* bytes queued for Tx */ + u64 add_vlan; /* HW adds vlan tag */ + u64 cq_work; /* Tx completions processed */ + u64 cq_bytes; /* Tx bytes processed */ + u64 null_pkt; /* skb length <= 0 */ + u64 skb_linear_fail; /* linearize failures */ + u64 desc_full_awake; /* TX ring full while queue awake */ +}; + +struct enic_rq_stats { + u64 packets; /* pkts received */ + u64 bytes; /* bytes received */ + u64 l4_rss_hash; /* hashed on l4 */ + u64 l3_rss_hash; /* hashed on l3 */ + u64 csum_unnecessary; /* HW verified csum */ + u64 csum_unnecessary_encap; /* HW verified csum on encap packet */ + u64 vlan_stripped; /* HW stripped vlan */ + u64 napi_complete; /* napi complete intr reenabled */ + u64 napi_repoll; /* napi poll again */ + u64 bad_fcs; /* bad pkts */ + u64 pkt_truncated; /* truncated pkts */ + u64 no_skb; /* out of skbs */ + u64 desc_skip; /* Rx pkt went into later buffer */ + u64 pp_alloc_fail; /* page pool alloc failure */ +}; + +struct enic_wq { + spinlock_t lock; /* spinlock for wq */ + struct vnic_wq vwq; + struct enic_wq_stats stats; +} ____cacheline_aligned; + +struct enic_rq { + struct vnic_rq vrq; + struct enic_rq_stats stats; + struct page_pool *pool; +} ____cacheline_aligned; + /* Per-instance private data structure */ struct enic { struct net_device *netdev; @@ -139,8 +197,8 @@ struct enic { struct work_struct reset; struct work_struct tx_hang_reset; struct work_struct change_mtu_work; - struct msix_entry msix_entry[ENIC_INTR_MAX]; - struct enic_msix_entry msix[ENIC_INTR_MAX]; + struct msix_entry *msix_entry; + struct enic_msix_entry *msix; u32 msg_enable; spinlock_t devcmd_lock; u8 mac_addr[ETH_ALEN]; @@ -159,33 +217,30 @@ struct enic { bool enic_api_busy; struct enic_port_profile *pp; - /* work queue cache line section */ - ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; - spinlock_t wq_lock[ENIC_WQ_MAX]; + struct enic_wq *wq; + unsigned int wq_avail; unsigned int wq_count; u16 loop_enable; u16 loop_tag; - /* receive queue cache line section */ - ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; + struct enic_rq *rq; + unsigned int rq_avail; unsigned int rq_count; struct vxlan_offload vxlan; - u64 rq_truncated_pkts; - u64 rq_bad_fcs; - struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; + struct napi_struct *napi; - /* interrupt resource cache line section */ - ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; + struct vnic_intr *intr; + unsigned int intr_avail; unsigned int intr_count; u32 __iomem *legacy_pba; /* memory-mapped */ - /* completion queue cache line section */ - ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; + struct vnic_cq *cq; + unsigned int cq_avail; unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; - u32 rx_copybreak; u8 rss_key[ENIC_RSS_LEN]; struct vnic_gen_stats gen_stats; + enum ext_cq ext_cq; }; static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) @@ -238,18 +293,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic, return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; } -static inline unsigned int enic_msix_err_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count; -} +/* MSIX interrupts are organized as the error interrupt, then the notify + * interrupt followed by all the I/O interrupts. The error interrupt needs + * to fit in 7 bits due to hardware constraints + */ +#define ENIC_MSIX_RESERVED_INTR 2 +#define ENIC_MSIX_ERR_INTR 0 +#define ENIC_MSIX_NOTIFY_INTR 1 +#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR +#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2) #define ENIC_LEGACY_IO_INTR 0 #define ENIC_LEGACY_ERR_INTR 1 #define ENIC_LEGACY_NOTIFY_INTR 2 +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return ENIC_MSIX_ERR_INTR; +} + static inline unsigned int enic_msix_notify_intr(struct enic *enic) { - return enic->rq_count + enic->wq_count + 1; + return ENIC_MSIX_NOTIFY_INTR; } static inline bool enic_is_err_intr(struct enic *enic, int intr) @@ -297,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf); int enic_is_dynamic(struct enic *enic); void enic_set_ethtool_ops(struct net_device *netdev); int __enic_set_rsskey(struct enic *enic); +void enic_ext_cq(struct enic *enic); #endif /* _ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 788ba87eec80a..529160926a963 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -32,6 +32,41 @@ struct enic_stat { .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\ } +#define ENIC_PER_RQ_STAT(stat) { \ + .name = "rq[%d]_"#stat, \ + .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \ +} + +#define ENIC_PER_WQ_STAT(stat) { \ + .name = "wq[%d]_"#stat, \ + .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \ +} + +static const struct enic_stat enic_per_rq_stats[] = { + ENIC_PER_RQ_STAT(l4_rss_hash), + ENIC_PER_RQ_STAT(l3_rss_hash), + ENIC_PER_RQ_STAT(csum_unnecessary_encap), + ENIC_PER_RQ_STAT(vlan_stripped), + ENIC_PER_RQ_STAT(napi_complete), + ENIC_PER_RQ_STAT(napi_repoll), + ENIC_PER_RQ_STAT(no_skb), + ENIC_PER_RQ_STAT(desc_skip), +}; + +#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats) + +static const struct enic_stat enic_per_wq_stats[] = { + ENIC_PER_WQ_STAT(encap_tso), + ENIC_PER_WQ_STAT(encap_csum), + ENIC_PER_WQ_STAT(add_vlan), + ENIC_PER_WQ_STAT(cq_work), + ENIC_PER_WQ_STAT(cq_bytes), + ENIC_PER_WQ_STAT(null_pkt), + ENIC_PER_WQ_STAT(skb_linear_fail), + ENIC_PER_WQ_STAT(desc_full_awake), +}; + +#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats) static const struct enic_stat enic_tx_stats[] = { ENIC_TX_STAT(tx_frames_ok), ENIC_TX_STAT(tx_unicast_frames_ok), @@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = { ENIC_TX_STAT(tx_tso), }; +#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats) + static const struct enic_stat enic_rx_stats[] = { ENIC_RX_STAT(rx_frames_ok), ENIC_RX_STAT(rx_frames_total), @@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = { ENIC_RX_STAT(rx_frames_to_max), }; +#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats) + static const struct enic_stat enic_gen_stats[] = { ENIC_GEN_STAT(dma_map_error), }; -static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); -static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); -static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats); +#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats) static void enic_intr_coal_set_rx(struct enic *enic, u32 timer) { @@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev, static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct enic *enic = netdev_priv(netdev); unsigned int i; + unsigned int j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < enic_n_tx_stats; i++) { + for (i = 0; i < NUM_ENIC_TX_STATS; i++) { memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } - for (i = 0; i < enic_n_rx_stats; i++) { + for (i = 0; i < NUM_ENIC_RX_STATS; i++) { memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } - for (i = 0; i < enic_n_gen_stats; i++) { + for (i = 0; i < NUM_ENIC_GEN_STATS; i++) { memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } + for (i = 0; i < enic->rq_count; i++) { + for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) { + snprintf(data, ETH_GSTRING_LEN, + enic_per_rq_stats[j].name, i); + data += ETH_GSTRING_LEN; + } + } + for (i = 0; i < enic->wq_count; i++) { + for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) { + snprintf(data, ETH_GSTRING_LEN, + enic_per_wq_stats[j].name, i); + data += ETH_GSTRING_LEN; + } + } break; } } @@ -169,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_enet_config *c = &enic->config; - ring->rx_max_pending = ENIC_MAX_RQ_DESCS; + ring->rx_max_pending = c->max_rq_ring; ring->rx_pending = c->rq_desc_count; - ring->tx_max_pending = ENIC_MAX_WQ_DESCS; + ring->tx_max_pending = c->max_wq_ring; ring->tx_pending = c->wq_desc_count; } @@ -199,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev, } rx_pending = c->rq_desc_count; tx_pending = c->wq_desc_count; - if (ring->rx_pending > ENIC_MAX_RQ_DESCS || + if (ring->rx_pending > c->max_rq_ring || ring->rx_pending < ENIC_MIN_RQ_DESCS) { netdev_info(netdev, "rx pending (%u) not in range [%u,%u]", ring->rx_pending, ENIC_MIN_RQ_DESCS, - ENIC_MAX_RQ_DESCS); + c->max_rq_ring); return -EINVAL; } - if (ring->tx_pending > ENIC_MAX_WQ_DESCS || + if (ring->tx_pending > c->max_wq_ring || ring->tx_pending < ENIC_MIN_WQ_DESCS) { netdev_info(netdev, "tx pending (%u) not in range [%u,%u]", ring->tx_pending, ENIC_MIN_WQ_DESCS, - ENIC_MAX_WQ_DESCS); + c->max_wq_ring); return -EINVAL; } if (running) @@ -242,9 +295,19 @@ static int enic_set_ringparam(struct net_device *netdev, static int enic_get_sset_count(struct net_device *netdev, int sset) { + struct enic *enic = netdev_priv(netdev); + unsigned int n_per_rq_stats; + unsigned int n_per_wq_stats; + unsigned int n_stats; + switch (sset) { case ETH_SS_STATS: - return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats; + n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count; + n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count; + n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS + + NUM_ENIC_GEN_STATS + + n_per_rq_stats + n_per_wq_stats; + return n_stats; default: return -EOPNOTSUPP; } @@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_stats *vstats; unsigned int i; + unsigned int j; int err; err = enic_dev_stats_dump(enic, &vstats); @@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev, if (err == -ENOMEM) return; - for (i = 0; i < enic_n_tx_stats; i++) + for (i = 0; i < NUM_ENIC_TX_STATS; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; - for (i = 0; i < enic_n_rx_stats; i++) + for (i = 0; i < NUM_ENIC_RX_STATS; i++) *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; - for (i = 0; i < enic_n_gen_stats; i++) + for (i = 0; i < NUM_ENIC_GEN_STATS; i++) *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index]; + for (i = 0; i < enic->rq_count; i++) { + struct enic_rq_stats *rqstats = &enic->rq[i].stats; + int index; + + for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) { + index = enic_per_rq_stats[j].index; + *(data++) = ((u64 *)rqstats)[index]; + } + } + for (i = 0; i < enic->wq_count; i++) { + struct enic_wq_stats *wqstats = &enic->wq[i].stats; + int index; + + for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) { + index = enic_per_wq_stats[j].index; + *(data++) = ((u64 *)wqstats)[index]; + } + } } static u32 enic_get_msglevel(struct net_device *netdev) @@ -526,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int enic_get_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, void *data) -{ - struct enic *enic = netdev_priv(dev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = enic->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int enic_set_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, - const void *data) -{ - struct enic *enic = netdev_priv(dev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - enic->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - static u32 enic_get_rxfh_key_size(struct net_device *netdev) { return ENIC_RSS_LEN; @@ -606,6 +651,28 @@ static int enic_get_ts_info(struct net_device *netdev, return 0; } +static void enic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct enic *enic = netdev_priv(netdev); + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_MSIX: + channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX); + channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX); + channels->rx_count = enic->rq_count; + channels->tx_count = enic->wq_count; + break; + case VNIC_DEV_INTR_MODE_MSI: + case VNIC_DEV_INTR_MODE_INTX: + channels->max_combined = 1; + channels->combined_count = 1; + break; + default: + break; + } +} + static const struct ethtool_ops enic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | @@ -623,13 +690,12 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_coalesce = enic_get_coalesce, .set_coalesce = enic_set_coalesce, .get_rxnfc = enic_get_rxnfc, - .get_tunable = enic_get_tunable, - .set_tunable = enic_set_tunable, .get_rxfh_key_size = enic_get_rxfh_key_size, .get_rxfh = enic_get_rxfh, .set_rxfh = enic_set_rxfh, .get_link_ksettings = enic_get_ksettings, .get_ts_info = enic_get_ts_info, + .get_channels = enic_get_channels, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f604119efc809..702305e82fa37 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "cq_enet_desc.h" #include "vnic_dev.h" @@ -57,18 +58,15 @@ #include "enic_dev.h" #include "enic_pp.h" #include "enic_clsf.h" +#include "enic_rq.h" +#include "enic_wq.h" #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) -#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) -#define MAX_TSO (1 << 16) -#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ -#define RX_COPYBREAK_DEFAULT 256 - /* Supported devices */ static const struct pci_device_id enic_id_table[] = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, @@ -108,7 +106,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { {0, 0}, /* 0 - 4 Gbps */ {0, 3}, /* 4 - 10 Gbps */ - {3, 6}, /* 10 - 40 Gbps */ + {3, 6}, /* 10+ Gbps */ }; static void enic_init_affinity_hint(struct enic *enic) @@ -321,48 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf) #endif } -static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) -{ - struct enic *enic = vnic_dev_priv(wq->vdev); - - if (buf->sop) - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_TO_DEVICE); - else - dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_TO_DEVICE); - - if (buf->os_buf) - dev_kfree_skb_any(buf->os_buf); -} - -static void enic_wq_free_buf(struct vnic_wq *wq, - struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) -{ - enic_free_wq_buf(wq, buf); -} - -static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque) -{ - struct enic *enic = vnic_dev_priv(vdev); - - spin_lock(&enic->wq_lock[q_number]); - - vnic_wq_service(&enic->wq[q_number], cq_desc, - completed_index, enic_wq_free_buf, - opaque); - - if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && - vnic_wq_desc_avail(&enic->wq[q_number]) >= - (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) - netif_wake_subqueue(enic->netdev, q_number); - - spin_unlock(&enic->wq_lock[q_number]); - - return 0; -} - static bool enic_log_q_error(struct enic *enic) { unsigned int i; @@ -370,7 +326,7 @@ static bool enic_log_q_error(struct enic *enic) bool err = false; for (i = 0; i < enic->wq_count; i++) { - error_status = vnic_wq_error_status(&enic->wq[i]); + error_status = vnic_wq_error_status(&enic->wq[i].vwq); err |= error_status; if (error_status) netdev_err(enic->netdev, "WQ[%d] error_status %d\n", @@ -378,7 +334,7 @@ static bool enic_log_q_error(struct enic *enic) } for (i = 0; i < enic->rq_count; i++) { - error_status = vnic_rq_error_status(&enic->rq[i]); + error_status = vnic_rq_error_status(&enic->rq[i].vrq); err |= error_status; if (error_status) netdev_err(enic->netdev, "RQ[%d] error_status %d\n", @@ -421,6 +377,36 @@ static void enic_mtu_check(struct enic *enic) } } +static void enic_set_rx_coal_setting(struct enic *enic) +{ + unsigned int speed; + int index = -1; + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + + /* 1. Read the link speed from fw + * 2. Pick the default range for the speed + * 3. Update it in enic->rx_coalesce_setting + */ + speed = vnic_dev_port_speed(enic->vdev); + if (speed > ENIC_LINK_SPEED_10G) + index = ENIC_LINK_40G_INDEX; + else if (speed > ENIC_LINK_SPEED_4G) + index = ENIC_LINK_10G_INDEX; + else + index = ENIC_LINK_4G_INDEX; + + rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; + rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; + rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; + + /* Start with the value provided by UCSM */ + for (index = 0; index < enic->rq_count; index++) + enic->cq[index].cur_rx_coal_timeval = + enic->config.intr_timer_usec; + + rx_coal->use_adaptive_rx_coalesce = 1; +} + static void enic_link_check(struct enic *enic) { int link_status = vnic_dev_link_status(enic->vdev); @@ -429,6 +415,7 @@ static void enic_link_check(struct enic *enic) if (link_status && !carrier_ok) { netdev_info(enic->netdev, "Link UP\n"); netif_carrier_on(enic->netdev); + enic_set_rx_coal_setting(enic); } else if (!link_status && carrier_ok) { netdev_info(enic->netdev, "Link DOWN\n"); netif_carrier_off(enic->netdev); @@ -590,6 +577,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + /* The enic_queue_wq_desc() above does not do HW checksum */ + enic->wq[wq->index].stats.csum_none++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; + return err; } @@ -622,6 +614,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + enic->wq[wq->index].stats.csum_partial++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; + return err; } @@ -676,15 +672,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, unsigned int offset = 0; unsigned int hdr_len; dma_addr_t dma_addr; + unsigned int pkts; unsigned int len; skb_frag_t *frag; if (skb->encapsulation) { hdr_len = skb_inner_tcp_all_headers(skb); enic_preload_tcp_csum_encap(skb); + enic->wq[wq->index].stats.encap_tso++; } else { hdr_len = skb_tcp_all_headers(skb); enic_preload_tcp_csum(skb); + enic->wq[wq->index].stats.tso++; } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors @@ -705,7 +704,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, } if (eop) - return 0; + goto tso_out_stats; /* Queue WQ_ENET_MAX_DESC_LEN length descriptors * for additional data fragments @@ -732,6 +731,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, } } +tso_out_stats: + /* calculate how many packets tso sent */ + len = skb->len - hdr_len; + pkts = len / mss; + if ((len % mss) > 0) + pkts++; + enic->wq[wq->index].stats.packets += pkts; + enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len)); + return 0; } @@ -764,6 +772,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + enic->wq[wq->index].stats.encap_csum++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; + return err; } @@ -780,6 +792,7 @@ static inline int enic_queue_wq_skb(struct enic *enic, /* VLAN tag from trunking driver */ vlan_tag_insert = 1; vlan_tag = skb_vlan_tag_get(skb); + enic->wq[wq->index].stats.add_vlan++; } else if (enic->loop_enable) { vlan_tag = enic->loop_tag; loopback = 1; @@ -792,7 +805,7 @@ static inline int enic_queue_wq_skb(struct enic *enic, else if (skb->encapsulation) err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); - else if (skb->ip_summed == CHECKSUM_PARTIAL) + else if (skb->ip_summed == CHECKSUM_PARTIAL) err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); else @@ -825,13 +838,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, unsigned int txq_map; struct netdev_queue *txq; + txq_map = skb_get_queue_mapping(skb) % enic->wq_count; + wq = &enic->wq[txq_map].vwq; + if (skb->len <= 0) { dev_kfree_skb_any(skb); + enic->wq[wq->index].stats.null_pkt++; return NETDEV_TX_OK; } - txq_map = skb_get_queue_mapping(skb) % enic->wq_count; - wq = &enic->wq[txq_map]; txq = netdev_get_tx_queue(netdev, txq_map); /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, @@ -843,31 +858,35 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb_any(skb); + enic->wq[wq->index].stats.skb_linear_fail++; return NETDEV_TX_OK; } - spin_lock(&enic->wq_lock[txq_map]); + spin_lock(&enic->wq[txq_map].lock); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock(&enic->wq_lock[txq_map]); + spin_unlock(&enic->wq[txq_map].lock); + enic->wq[wq->index].stats.desc_full_awake++; return NETDEV_TX_BUSY; } if (enic_queue_wq_skb(enic, wq, skb)) goto error; - if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) + if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); + enic->wq[wq->index].stats.stopped++; + } skb_tx_timestamp(skb); if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); error: - spin_unlock(&enic->wq_lock[txq_map]); + spin_unlock(&enic->wq[txq_map].lock); return NETDEV_TX_OK; } @@ -878,7 +897,10 @@ static void enic_get_stats(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; + u64 pkt_truncated = 0; + u64 bad_fcs = 0; int err; + int i; err = enic_dev_stats_dump(enic, &stats); /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump @@ -897,8 +919,17 @@ static void enic_get_stats(struct net_device *netdev, net_stats->rx_bytes = stats->rx.rx_bytes_ok; net_stats->rx_errors = stats->rx.rx_errors; net_stats->multicast = stats->rx.rx_multicast_frames_ok; - net_stats->rx_over_errors = enic->rq_truncated_pkts; - net_stats->rx_crc_errors = enic->rq_bad_fcs; + + for (i = 0; i < enic->rq_count; i++) { + struct enic_rq_stats *rqs = &enic->rq[i].stats; + + if (!enic->rq[i].vrq.ctrl) + break; + pkt_truncated += rqs->pkt_truncated; + bad_fcs += rqs->bad_fcs; + } + net_stats->rx_over_errors = pkt_truncated; + net_stats->rx_crc_errors = bad_fcs; net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; } @@ -1117,18 +1148,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); if (port[IFLA_PORT_PROFILE]) { + if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_NAME; memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), PORT_PROFILE_MAX); } if (port[IFLA_PORT_INSTANCE_UUID]) { + if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_INSTANCE; memcpy(pp->instance_uuid, nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); } if (port[IFLA_PORT_HOST_UUID]) { + if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_HOST; memcpy(pp->host_uuid, nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); @@ -1219,230 +1262,6 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, return -EMSGSIZE; } -static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) -{ - struct enic *enic = vnic_dev_priv(rq->vdev); - - if (!buf->os_buf) - return; - - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - dev_kfree_skb_any(buf->os_buf); - buf->os_buf = NULL; -} - -static int enic_rq_alloc_buf(struct vnic_rq *rq) -{ - struct enic *enic = vnic_dev_priv(rq->vdev); - struct net_device *netdev = enic->netdev; - struct sk_buff *skb; - unsigned int len = netdev->mtu + VLAN_ETH_HLEN; - unsigned int os_buf_index = 0; - dma_addr_t dma_addr; - struct vnic_rq_buf *buf = rq->to_use; - - if (buf->os_buf) { - enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, - buf->len); - - return 0; - } - skb = netdev_alloc_skb_ip_align(netdev, len); - if (!skb) - return -ENOMEM; - - dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, - DMA_FROM_DEVICE); - if (unlikely(enic_dma_map_check(enic, dma_addr))) { - dev_kfree_skb(skb); - return -ENOMEM; - } - - enic_queue_rq_desc(rq, skb, os_buf_index, - dma_addr, len); - - return 0; -} - -static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, - u32 pkt_len) -{ - if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len) - pkt_size->large_pkt_bytes_cnt += pkt_len; - else - pkt_size->small_pkt_bytes_cnt += pkt_len; -} - -static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, - struct vnic_rq_buf *buf, u16 len) -{ - struct enic *enic = netdev_priv(netdev); - struct sk_buff *new_skb; - - if (len > enic->rx_copybreak) - return false; - new_skb = netdev_alloc_skb_ip_align(netdev, len); - if (!new_skb) - return false; - dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, - DMA_FROM_DEVICE); - memcpy(new_skb->data, (*skb)->data, len); - *skb = new_skb; - - return true; -} - -static void enic_rq_indicate_buf(struct vnic_rq *rq, - struct cq_desc *cq_desc, struct vnic_rq_buf *buf, - int skipped, void *opaque) -{ - struct enic *enic = vnic_dev_priv(rq->vdev); - struct net_device *netdev = enic->netdev; - struct sk_buff *skb; - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - - u8 type, color, eop, sop, ingress_port, vlan_stripped; - u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; - u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; - u8 packet_error; - u16 q_number, completed_index, bytes_written, vlan_tci, checksum; - u32 rss_hash; - bool outer_csum_ok = true, encap = false; - - if (skipped) - return; - - skb = buf->os_buf; - - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &ingress_port, &fcoe, &eop, &sop, &rss_type, - &csum_not_calc, &rss_hash, &bytes_written, - &packet_error, &vlan_stripped, &vlan_tci, &checksum, - &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, - &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, - &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, - &fcs_ok); - - if (packet_error) { - - if (!fcs_ok) { - if (bytes_written > 0) - enic->rq_bad_fcs++; - else if (bytes_written == 0) - enic->rq_truncated_pkts++; - } - - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - buf->os_buf = NULL; - - return; - } - - if (eop && bytes_written > 0) { - - /* Good receive - */ - - if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { - buf->os_buf = NULL; - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, - buf->len, DMA_FROM_DEVICE); - } - prefetch(skb->data - NET_IP_ALIGN); - - skb_put(skb, bytes_written); - skb->protocol = eth_type_trans(skb, netdev); - skb_record_rx_queue(skb, q_number); - if ((netdev->features & NETIF_F_RXHASH) && rss_hash && - (type == 3)) { - switch (rss_type) { - case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: - case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: - case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: - skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); - break; - case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: - case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: - case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: - skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); - break; - } - } - if (enic->vxlan.vxlan_udp_port_number) { - switch (enic->vxlan.patch_level) { - case 0: - if (fcoe) { - encap = true; - outer_csum_ok = fcoe_fc_crc_ok; - } - break; - case 2: - if ((type == 7) && - (rss_hash & BIT(0))) { - encap = true; - outer_csum_ok = (rss_hash & BIT(1)) && - (rss_hash & BIT(2)); - } - break; - } - } - - /* Hardware does not provide whole packet checksum. It only - * provides pseudo checksum. Since hw validates the packet - * checksum but not provide us the checksum value. use - * CHECSUM_UNNECESSARY. - * - * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is - * inner csum_ok. outer_csum_ok is set by hw when outer udp - * csum is correct or is zero. - */ - if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && - tcp_udp_csum_ok && outer_csum_ok && - (ipv4_csum_ok || ipv6)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = encap; - } - - if (vlan_stripped) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); - - skb_mark_napi_id(skb, &enic->napi[rq->index]); - if (!(netdev->features & NETIF_F_GRO)) - netif_receive_skb(skb); - else - napi_gro_receive(&enic->napi[q_number], skb); - if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_intr_update_pkt_size(&cq->pkt_size_counter, - bytes_written); - } else { - - /* Buffer overflow - */ - - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - buf->os_buf = NULL; - } -} - -static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque) -{ - struct enic *enic = vnic_dev_priv(vdev); - - vnic_rq_service(&enic->rq[q_number], cq_desc, - completed_index, VNIC_RQ_RETURN_DESC, - enic_rq_indicate_buf, opaque); - - return 0; -} - static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) { unsigned int intr = enic_msix_rq_intr(enic, rq->index); @@ -1513,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget) unsigned int work_done, rq_work_done = 0, wq_work_done; int err; - wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, - enic_wq_service, NULL); + wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do); if (budget > 0) - rq_work_done = vnic_cq_service(&enic->cq[cq_rq], - rq_work_to_do, enic_rq_service, NULL); + rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do); /* Accumulate intr event credits for this polling * cycle. An intr event is the completion of a @@ -1533,7 +1350,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1545,7 +1362,7 @@ static int enic_poll(struct napi_struct *napi, int budget) /* Call the function which refreshes the intr coalescing timer * value based on the traffic. */ - enic_calc_int_moderation(enic, &enic->rq[0]); + enic_calc_int_moderation(enic, &enic->rq[0].vrq); if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) { @@ -1554,8 +1371,11 @@ static int enic_poll(struct napi_struct *napi, int budget) */ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_set_int_moderation(enic, &enic->rq[0]); + enic_set_int_moderation(enic, &enic->rq[0].vrq); vnic_intr_unmask(&enic->intr[intr]); + enic->rq[0].stats.napi_complete++; + } else { + enic->rq[0].stats.napi_repoll++; } return rq_work_done; @@ -1604,7 +1424,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) struct net_device *netdev = napi->dev; struct enic *enic = netdev_priv(netdev); unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; - struct vnic_wq *wq = &enic->wq[wq_index]; + struct vnic_wq *wq = &enic->wq[wq_index].vwq; unsigned int cq; unsigned int intr; unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; @@ -1614,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) wq_irq = wq->index; cq = enic_cq_wq(enic, wq_irq); intr = enic_msix_wq_intr(enic, wq_irq); - wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, - enic_wq_service, NULL); + + wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do); vnic_intr_return_credits(&enic->intr[intr], wq_work_done, 0 /* don't unmask intr */, @@ -1644,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ if (budget > 0) - work_done = vnic_cq_service(&enic->cq[cq], - work_to_do, enic_rq_service, NULL); + work_done = enic_rq_cq_service(enic, cq, work_to_do); /* Return intr event credits for this polling * cycle. An intr event is the completion of a @@ -1658,7 +1477,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling mode * so we can try to fill the ring again. @@ -1670,7 +1489,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) /* Call the function which refreshes the intr coalescing timer * value based on the traffic. */ - enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_calc_int_moderation(enic, &enic->rq[rq].vrq); if ((work_done < budget) && napi_complete_done(napi, work_done)) { @@ -1679,8 +1498,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_set_int_moderation(enic, &enic->rq[rq]); + enic_set_int_moderation(enic, &enic->rq[rq].vrq); vnic_intr_unmask(&enic->intr[intr]); + enic->rq[rq].stats.napi_complete++; + } else { + enic->rq[rq].stats.napi_repoll++; } return work_done; @@ -1710,7 +1532,7 @@ static void enic_free_intr(struct enic *enic) free_irq(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_MSIX: - for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + for (i = 0; i < enic->intr_count; i++) if (enic->msix[i].requested) free_irq(enic->msix_entry[i].vector, enic->msix[i].devid); @@ -1777,7 +1599,7 @@ static int enic_request_intr(struct enic *enic) enic->msix[intr].isr = enic_isr_msix_notify; enic->msix[intr].devid = enic; - for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + for (i = 0; i < enic->intr_count; i++) enic->msix[i].requested = 0; for (i = 0; i < enic->intr_count; i++) { @@ -1819,36 +1641,6 @@ static void enic_synchronize_irqs(struct enic *enic) } } -static void enic_set_rx_coal_setting(struct enic *enic) -{ - unsigned int speed; - int index = -1; - struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - - /* 1. Read the link speed from fw - * 2. Pick the default range for the speed - * 3. Update it in enic->rx_coalesce_setting - */ - speed = vnic_dev_port_speed(enic->vdev); - if (ENIC_LINK_SPEED_10G < speed) - index = ENIC_LINK_40G_INDEX; - else if (ENIC_LINK_SPEED_4G < speed) - index = ENIC_LINK_10G_INDEX; - else - index = ENIC_LINK_4G_INDEX; - - rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; - rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; - rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; - - /* Start with the value provided by UCSM */ - for (index = 0; index < enic->rq_count; index++) - enic->cq[index].cur_rx_coal_timeval = - enic->config.intr_timer_usec; - - rx_coal->use_adaptive_rx_coalesce = 1; -} - static int enic_dev_notify_set(struct enic *enic) { int err; @@ -1889,6 +1681,17 @@ static int enic_open(struct net_device *netdev) struct enic *enic = netdev_priv(netdev); unsigned int i; int err, ret; + unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN; + struct page_pool_params pp_params = { + .order = get_order(max_pkt_len), + .pool_size = enic->config.rq_desc_count, + .nid = dev_to_node(&enic->pdev->dev), + .dev = &enic->pdev->dev, + .dma_dir = DMA_FROM_DEVICE, + .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE, + .netdev = netdev, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + }; err = enic_request_intr(enic); if (err) { @@ -1906,11 +1709,20 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->rq_count; i++) { + /* create a page pool for each RQ */ + pp_params.napi = &enic->napi[i]; + enic->rq[i].pool = page_pool_create(&pp_params); + if (IS_ERR(enic->rq[i].pool)) { + err = PTR_ERR(enic->rq[i].pool); + enic->rq[i].pool = NULL; + goto err_out_free_rq; + } + /* enable rq before updating rq desc */ - vnic_rq_enable(&enic->rq[i]); - vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); + vnic_rq_enable(&enic->rq[i].vrq); + vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ - if (vnic_rq_desc_used(&enic->rq[i]) == 0) { + if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; goto err_out_free_rq; @@ -1918,7 +1730,7 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->wq_count; i++) - vnic_wq_enable(&enic->wq[i]); + vnic_wq_enable(&enic->wq[i].vwq); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); @@ -1945,9 +1757,12 @@ static int enic_open(struct net_device *netdev) err_out_free_rq: for (i = 0; i < enic->rq_count; i++) { - ret = vnic_rq_disable(&enic->rq[i]); - if (!ret) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + ret = vnic_rq_disable(&enic->rq[i].vrq); + if (!ret) { + vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); + page_pool_destroy(enic->rq[i].pool); + enic->rq[i].pool = NULL; + } } enic_dev_notify_unset(enic); err_out_free_intr: @@ -1989,12 +1804,12 @@ static int enic_stop(struct net_device *netdev) enic_dev_del_station_addr(enic); for (i = 0; i < enic->wq_count; i++) { - err = vnic_wq_disable(&enic->wq[i]); + err = vnic_wq_disable(&enic->wq[i].vwq); if (err) return err; } for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); + err = vnic_rq_disable(&enic->rq[i].vrq); if (err) return err; } @@ -2004,9 +1819,12 @@ static int enic_stop(struct net_device *netdev) enic_free_intr(enic); for (i = 0; i < enic->wq_count; i++) - vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf); + for (i = 0; i < enic->rq_count; i++) { + vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); + page_pool_destroy(enic->rq[i].pool); + enic->rq[i].pool = NULL; + } for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -2045,10 +1863,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; - if (netdev->mtu > enic->port_mtu) + if (new_mtu > enic->port_mtu) netdev_warn(netdev, "interface MTU (%d) set higher than port MTU (%d)\n", - netdev->mtu, enic->port_mtu); + new_mtu, enic->port_mtu); return _enic_change_mtu(netdev, new_mtu); } @@ -2322,6 +2140,7 @@ static void enic_reset(struct work_struct *work) enic_init_vnic_resources(enic); enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_ext_cq(enic); enic_open(enic->netdev); /* Allow infiniband to fiddle with the device again */ @@ -2348,6 +2167,7 @@ static void enic_tx_hang_reset(struct work_struct *work) enic_init_vnic_resources(enic); enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_ext_cq(enic); enic_open(enic->netdev); /* Allow infiniband to fiddle with the device again */ @@ -2360,112 +2180,56 @@ static void enic_tx_hang_reset(struct work_struct *work) static int enic_set_intr_mode(struct enic *enic) { - unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); - unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); unsigned int i; + int num_intr; /* Set interrupt mode (INTx, MSI, MSI-X) depending * on system capabilities. * * Try MSI-X first - * - * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs - * (the second to last INTR is used for WQ/RQ errors) - * (the last INTR is used for notifications) - */ - - BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); - for (i = 0; i < n + m + 2; i++) - enic->msix_entry[i].entry = i; - - /* Use multiple RQs if RSS is enabled */ - if (ENIC_SETTING(enic, RSS) && - enic->config.intr_mode < 1 && - enic->rq_count >= n && - enic->wq_count >= m && - enic->cq_count >= n + m && - enic->intr_count >= n + m + 2) { - - if (pci_enable_msix_range(enic->pdev, enic->msix_entry, - n + m + 2, n + m + 2) > 0) { - - enic->rq_count = n; - enic->wq_count = m; - enic->cq_count = n + m; - enic->intr_count = n + m + 2; - - vnic_dev_set_intr_mode(enic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - - return 0; - } - } - if (enic->config.intr_mode < 1 && - enic->rq_count >= 1 && - enic->wq_count >= m && - enic->cq_count >= 1 + m && - enic->intr_count >= 1 + m + 2) { - if (pci_enable_msix_range(enic->pdev, enic->msix_entry, - 1 + m + 2, 1 + m + 2) > 0) { - - enic->rq_count = 1; - enic->wq_count = m; - enic->cq_count = 1 + m; - enic->intr_count = 1 + m + 2; - + enic->intr_avail >= ENIC_MSIX_MIN_INTR) { + for (i = 0; i < enic->intr_avail; i++) + enic->msix_entry[i].entry = i; + + num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry, + ENIC_MSIX_MIN_INTR, + enic->intr_avail); + if (num_intr > 0) { vnic_dev_set_intr_mode(enic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - + VNIC_DEV_INTR_MODE_MSIX); + enic->intr_avail = num_intr; return 0; } } /* Next try MSI * - * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR + * We need 1 INTR */ if (enic->config.intr_mode < 2 && - enic->rq_count >= 1 && - enic->wq_count >= 1 && - enic->cq_count >= 2 && - enic->intr_count >= 1 && + enic->intr_avail >= 1 && !pci_enable_msi(enic->pdev)) { - - enic->rq_count = 1; - enic->wq_count = 1; - enic->cq_count = 2; - enic->intr_count = 1; - + enic->intr_avail = 1; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); - return 0; } /* Next try INTx * - * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs + * We need 3 INTRs * (the first INTR is used for WQ/RQ) * (the second INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ if (enic->config.intr_mode < 3 && - enic->rq_count >= 1 && - enic->wq_count >= 1 && - enic->cq_count >= 2 && - enic->intr_count >= 3) { - - enic->rq_count = 1; - enic->wq_count = 1; - enic->cq_count = 2; - enic->intr_count = 3; - + enic->intr_avail >= 3) { + enic->intr_avail = 3; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); - return 0; } @@ -2490,6 +2254,127 @@ static void enic_clear_intr_mode(struct enic *enic) vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); } +static int enic_adjust_resources(struct enic *enic) +{ + unsigned int max_queues; + unsigned int rq_default; + unsigned int rq_avail; + unsigned int wq_avail; + + if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) { + dev_err(enic_get_dev(enic), + "Not enough resources available rq: %d wq: %d cq: %d\n", + enic->rq_avail, enic->wq_avail, + enic->cq_avail); + return -ENOSPC; + } + + if (is_kdump_kernel()) { + dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); + enic->rq_avail = 1; + enic->wq_avail = 1; + enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; + enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; + enic->config.mtu = min_t(u16, 1500, enic->config.mtu); + } + + /* if RSS isn't set, then we can only use one RQ */ + if (!ENIC_SETTING(enic, RSS)) + enic->rq_avail = 1; + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + enic->rq_count = 1; + enic->wq_count = 1; + enic->cq_count = 2; + enic->intr_count = enic->intr_avail; + break; + case VNIC_DEV_INTR_MODE_MSIX: + /* Adjust the number of wqs/rqs/cqs/interrupts that will be + * used based on which resource is the most constrained + */ + wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); + rq_default = max(netif_get_num_default_rss_queues(), + ENIC_RQ_MIN_DEFAULT); + rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); + max_queues = min(enic->cq_avail, + enic->intr_avail - ENIC_MSIX_RESERVED_INTR); + if (wq_avail + rq_avail <= max_queues) { + enic->rq_count = rq_avail; + enic->wq_count = wq_avail; + } else { + /* recalculate wq/rq count */ + if (rq_avail < wq_avail) { + enic->rq_count = min(rq_avail, max_queues / 2); + enic->wq_count = max_queues - enic->rq_count; + } else { + enic->wq_count = min(wq_avail, max_queues / 2); + enic->rq_count = max_queues - enic->wq_count; + } + } + enic->cq_count = enic->rq_count + enic->wq_count; + enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR; + + break; + default: + dev_err(enic_get_dev(enic), "Unknown interrupt mode\n"); + return -EINVAL; + } + + return 0; +} + +static void enic_get_queue_stats_rx(struct net_device *dev, int idx, + struct netdev_queue_stats_rx *rxs) +{ + struct enic *enic = netdev_priv(dev); + struct enic_rq_stats *rqstats = &enic->rq[idx].stats; + + rxs->bytes = rqstats->bytes; + rxs->packets = rqstats->packets; + rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated; + rxs->hw_drop_overruns = rqstats->pkt_truncated; + rxs->csum_unnecessary = rqstats->csum_unnecessary + + rqstats->csum_unnecessary_encap; + rxs->alloc_fail = rqstats->pp_alloc_fail; +} + +static void enic_get_queue_stats_tx(struct net_device *dev, int idx, + struct netdev_queue_stats_tx *txs) +{ + struct enic *enic = netdev_priv(dev); + struct enic_wq_stats *wqstats = &enic->wq[idx].stats; + + txs->bytes = wqstats->bytes; + txs->packets = wqstats->packets; + txs->csum_none = wqstats->csum_none; + txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum + + wqstats->tso; + txs->hw_gso_packets = wqstats->tso; + txs->stop = wqstats->stopped; + txs->wake = wqstats->wake; +} + +static void enic_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rxs, + struct netdev_queue_stats_tx *txs) +{ + rxs->bytes = 0; + rxs->packets = 0; + rxs->hw_drops = 0; + rxs->hw_drop_overruns = 0; + rxs->csum_unnecessary = 0; + rxs->alloc_fail = 0; + txs->bytes = 0; + txs->packets = 0; + txs->csum_none = 0; + txs->needs_csum = 0; + txs->hw_gso_packets = 0; + txs->stop = 0; + txs->wake = 0; +} + static const struct net_device_ops enic_netdev_dynamic_ops = { .ndo_open = enic_open, .ndo_stop = enic_stop, @@ -2538,6 +2423,77 @@ static const struct net_device_ops enic_netdev_ops = { .ndo_features_check = enic_features_check, }; +static const struct netdev_stat_ops enic_netdev_stat_ops = { + .get_queue_stats_rx = enic_get_queue_stats_rx, + .get_queue_stats_tx = enic_get_queue_stats_tx, + .get_base_stats = enic_get_base_stats, +}; + +static void enic_free_enic_resources(struct enic *enic) +{ + kfree(enic->wq); + enic->wq = NULL; + + kfree(enic->rq); + enic->rq = NULL; + + kfree(enic->cq); + enic->cq = NULL; + + kfree(enic->napi); + enic->napi = NULL; + + kfree(enic->msix_entry); + enic->msix_entry = NULL; + + kfree(enic->msix); + enic->msix = NULL; + + kfree(enic->intr); + enic->intr = NULL; +} + +static int enic_alloc_enic_resources(struct enic *enic) +{ + enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL); + if (!enic->wq) + goto free_queues; + + enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL); + if (!enic->rq) + goto free_queues; + + enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL); + if (!enic->cq) + goto free_queues; + + enic->napi = kcalloc(enic->wq_avail + enic->rq_avail, + sizeof(struct napi_struct), GFP_KERNEL); + if (!enic->napi) + goto free_queues; + + enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry), + GFP_KERNEL); + if (!enic->msix_entry) + goto free_queues; + + enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry), + GFP_KERNEL); + if (!enic->msix) + goto free_queues; + + enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr), + GFP_KERNEL); + if (!enic->intr) + goto free_queues; + + return 0; + +free_queues: + enic_free_enic_resources(enic); + return -ENOMEM; +} + static void enic_dev_deinit(struct enic *enic) { unsigned int i; @@ -2555,18 +2511,7 @@ static void enic_dev_deinit(struct enic *enic) enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); enic_free_affinity_hint(enic); -} - -static void enic_kdump_kernel_config(struct enic *enic) -{ - if (is_kdump_kernel()) { - dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); - enic->rq_count = 1; - enic->wq_count = 1; - enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; - enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; - enic->config.mtu = min_t(u16, 1500, enic->config.mtu); - } + enic_free_enic_resources(enic); } static int enic_dev_init(struct enic *enic) @@ -2598,19 +2543,28 @@ static int enic_dev_init(struct enic *enic) enic_get_res_counts(enic); - /* modify resource count if we are in kdump_kernel - */ - enic_kdump_kernel_config(enic); + enic_ext_cq(enic); - /* Set interrupt mode based on resource counts and system - * capabilities - */ + err = enic_alloc_enic_resources(enic); + if (err) { + dev_err(dev, "Failed to allocate enic resources\n"); + return err; + } + + /* Set interrupt mode based on system capabilities */ err = enic_set_intr_mode(enic); if (err) { dev_err(dev, "Failed to set intr mode based on resource " "counts and system capabilities, aborting\n"); - return err; + goto err_out_free_vnic_resources; + } + + /* Adjust resource counts based on most constrained resources */ + err = enic_adjust_resources(enic); + if (err) { + dev_err(dev, "Failed to adjust resources\n"); + goto err_out_free_vnic_resources; } /* Allocate and configure vNIC resources @@ -2652,6 +2606,7 @@ static int enic_dev_init(struct enic *enic) enic_free_affinity_hint(enic); enic_clear_intr_mode(enic); enic_free_vnic_resources(enic); + enic_free_enic_resources(enic); return err; } @@ -2851,13 +2806,12 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&enic->notify_timer, enic_notify_timer, 0); enic_rfs_flw_tbl_init(enic); - enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); for (i = 0; i < enic->wq_count; i++) - spin_lock_init(&enic->wq_lock[i]); + spin_lock_init(&enic->wq[i].lock); /* Register net device */ @@ -2880,6 +2834,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &enic_netdev_dynamic_ops; else netdev->netdev_ops = &enic_netdev_ops; + netdev->stat_ops = &enic_netdev_stat_ops; netdev->watchdog_timeo = 2 * HZ; enic_set_ethtool_ops(netdev); @@ -2966,7 +2921,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(dev, "Cannot register net device, aborting\n"); goto err_out_dev_deinit; } - enic->rx_copybreak = RX_COPYBREAK_DEFAULT; return 0; diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 1c48aebdbab02..bbd3143ed73e7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic) GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); GET_CONFIG(num_arfs); + GET_CONFIG(max_rq_ring); + GET_CONFIG(max_wq_ring); + GET_CONFIG(max_cq_ring); + + if (!c->max_wq_ring) + c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT; + if (!c->max_rq_ring) + c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT; + if (!c->max_cq_ring) + c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT; c->wq_desc_count = - min_t(u32, ENIC_MAX_WQ_DESCS, - max_t(u32, ENIC_MIN_WQ_DESCS, - c->wq_desc_count)); + min_t(u32, c->max_wq_ring, + max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count)); c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c->rq_desc_count = - min_t(u32, ENIC_MAX_RQ_DESCS, - max_t(u32, ENIC_MIN_RQ_DESCS, - c->rq_desc_count)); + min_t(u32, c->max_rq_ring, + max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ if (c->mtu == 0) c->mtu = 1500; - c->mtu = min_t(u16, ENIC_MAX_MTU, - max_t(u16, ENIC_MIN_MTU, - c->mtu)); + c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu)); c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), - "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", - enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); + "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n", + enic->mac_addr, c->wq_desc_count, c->rq_desc_count, + c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec " @@ -176,9 +183,9 @@ void enic_free_vnic_resources(struct enic *enic) unsigned int i; for (i = 0; i < enic->wq_count; i++) - vnic_wq_free(&enic->wq[i]); + vnic_wq_free(&enic->wq[i].vwq); for (i = 0; i < enic->rq_count; i++) - vnic_rq_free(&enic->rq[i]); + vnic_rq_free(&enic->rq[i].vrq); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -187,16 +194,21 @@ void enic_free_vnic_resources(struct enic *enic) void enic_get_res_counts(struct enic *enic) { - enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); - enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); - enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); - enic->intr_count = vnic_dev_get_res_count(enic->vdev, - RES_TYPE_INTR_CTRL); + enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->intr_avail = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + enic->wq_count = enic->wq_avail; + enic->rq_count = enic->rq_avail; + enic->cq_count = enic->cq_avail; + enic->intr_count = enic->intr_avail; dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", - enic->wq_count, enic->rq_count, - enic->cq_count, enic->intr_count); + enic->wq_avail, enic->rq_avail, + enic->cq_avail, enic->intr_avail); } void enic_init_vnic_resources(struct enic *enic) @@ -221,9 +233,12 @@ void enic_init_vnic_resources(struct enic *enic) switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: + error_interrupt_enable = 1; + error_interrupt_offset = ENIC_LEGACY_ERR_INTR; + break; case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; - error_interrupt_offset = enic->intr_count - 2; + error_interrupt_offset = enic_msix_err_intr(enic); break; default: error_interrupt_enable = 0; @@ -233,7 +248,7 @@ void enic_init_vnic_resources(struct enic *enic) for (i = 0; i < enic->rq_count; i++) { cq_index = i; - vnic_rq_init(&enic->rq[i], + vnic_rq_init(&enic->rq[i].vrq, cq_index, error_interrupt_enable, error_interrupt_offset); @@ -241,7 +256,7 @@ void enic_init_vnic_resources(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { cq_index = enic->rq_count + i; - vnic_wq_init(&enic->wq[i], + vnic_wq_init(&enic->wq[i].vwq, cq_index, error_interrupt_enable, error_interrupt_offset); @@ -249,15 +264,15 @@ void enic_init_vnic_resources(struct enic *enic) /* Init CQ resources * - * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI - * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X + * All CQs point to INTR[0] for INTx, MSI + * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X */ for (i = 0; i < enic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: - interrupt_offset = i; + interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i; break; default: interrupt_offset = 0; @@ -304,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic) int enic_alloc_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; + int rq_cq_desc_size; unsigned int i; int err; @@ -318,11 +334,29 @@ int enic_alloc_vnic_resources(struct enic *enic) intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); + switch (enic->ext_cq) { + case ENIC_RQ_CQ_ENTRY_SIZE_16: + rq_cq_desc_size = 16; + break; + case ENIC_RQ_CQ_ENTRY_SIZE_32: + rq_cq_desc_size = 32; + break; + case ENIC_RQ_CQ_ENTRY_SIZE_64: + rq_cq_desc_size = 64; + break; + default: + dev_err(enic_get_dev(enic), + "Unable to determine rq cq desc size: %d", + enic->ext_cq); + err = -ENODEV; + goto err_out; + } + /* Allocate queue resources */ for (i = 0; i < enic->wq_count; i++) { - err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, + err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i, enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) @@ -330,7 +364,7 @@ int enic_alloc_vnic_resources(struct enic *enic) } for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, + err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i, enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) @@ -340,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic) for (i = 0; i < enic->cq_count; i++) { if (i < enic->rq_count) err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, - enic->config.rq_desc_count, - sizeof(struct cq_enet_rq_desc)); + enic->config.rq_desc_count, + rq_cq_desc_size); else err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.wq_desc_count, @@ -372,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic) err_out_cleanup: enic_free_vnic_resources(enic); - +err_out: return err; } + +/* + * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support + * that command + */ +void enic_ext_cq(struct enic *enic) +{ + u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0; + int wait = 1000; + int ret; + + spin_lock_bh(&enic->devcmd_lock); + ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait); + if (ret || a0) { + dev_info(&enic->pdev->dev, + "CMD_CQ_ENTRY_SIZE_SET not supported."); + enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16; + goto out; + } + a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT; + enic->ext_cq = fls(a1) - 1; + a0 = VNIC_RQ_ALL; + a1 = enic->ext_cq; + ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait); + if (ret) { + dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed."); + enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16; + } +out: + spin_unlock_bh(&enic->devcmd_lock); + dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes", + 16 << enic->ext_cq); +} diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h index b8ee42d297aaf..02dca1ae4a224 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.h +++ b/drivers/net/ethernet/cisco/enic/enic_res.h @@ -12,10 +12,13 @@ #include "vnic_wq.h" #include "vnic_rq.h" -#define ENIC_MIN_WQ_DESCS 64 -#define ENIC_MAX_WQ_DESCS 4096 -#define ENIC_MIN_RQ_DESCS 64 -#define ENIC_MAX_RQ_DESCS 4096 +#define ENIC_MIN_WQ_DESCS 64 +#define ENIC_MAX_WQ_DESCS_DEFAULT 4096 +#define ENIC_MAX_WQ_DESCS 16384 +#define ENIC_MIN_RQ_DESCS 64 +#define ENIC_MAX_RQ_DESCS 16384 +#define ENIC_MAX_RQ_DESCS_DEFAULT 4096 +#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024) #define ENIC_MIN_MTU ETH_MIN_MTU #define ENIC_MAX_MTU 9000 diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c new file mode 100644 index 0000000000000..ccbf5c9a21d0f --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_rq.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2024 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include "enic.h" +#include "enic_res.h" +#include "enic_rq.h" +#include "vnic_rq.h" +#include "cq_enet_desc.h" + +#define ENIC_LARGE_PKT_THRESHOLD 1000 + +static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, + u32 pkt_len) +{ + if (pkt_len > ENIC_LARGE_PKT_THRESHOLD) + pkt_size->large_pkt_bytes_cnt += pkt_len; + else + pkt_size->small_pkt_bytes_cnt += pkt_len; +} + +static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type, + u8 *color, u16 *q_number, u16 *completed_index) +{ + /* type_color is the last field for all cq structs */ + u8 type_color; + + switch (cq_desc_size) { + case VNIC_RQ_CQ_ENTRY_SIZE_16: { + struct cq_enet_rq_desc *desc = + (struct cq_enet_rq_desc *)cq_desc; + type_color = desc->type_color; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *q_number = le16_to_cpu(desc->q_number_rss_type_flags) & + CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index_flags) & + CQ_DESC_COMP_NDX_MASK; + break; + } + case VNIC_RQ_CQ_ENTRY_SIZE_32: { + struct cq_enet_rq_desc_32 *desc = + (struct cq_enet_rq_desc_32 *)cq_desc; + type_color = desc->type_color; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *q_number = le16_to_cpu(desc->q_number_rss_type_flags) & + CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index_flags) & + CQ_DESC_COMP_NDX_MASK; + *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) << + CQ_DESC_COMP_NDX_BITS; + break; + } + case VNIC_RQ_CQ_ENTRY_SIZE_64: { + struct cq_enet_rq_desc_64 *desc = + (struct cq_enet_rq_desc_64 *)cq_desc; + type_color = desc->type_color; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *q_number = le16_to_cpu(desc->q_number_rss_type_flags) & + CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index_flags) & + CQ_DESC_COMP_NDX_MASK; + *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) << + CQ_DESC_COMP_NDX_BITS; + break; + } + } + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + *type = type_color & CQ_DESC_TYPE_MASK; +} + +static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash, + u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok, + u8 vlan_stripped, u8 csum_not_calc, + u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok, + u16 vlan_tci, struct sk_buff *skb) +{ + struct enic *enic = vnic_dev_priv(vrq->vdev); + struct net_device *netdev = enic->netdev; + struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats; + bool outer_csum_ok = true, encap = false; + + if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) { + switch (rss_type) { + case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: + case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: + case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: + skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); + rqstats->l4_rss_hash++; + break; + case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: + case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: + case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: + skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); + rqstats->l3_rss_hash++; + break; + } + } + if (enic->vxlan.vxlan_udp_port_number) { + switch (enic->vxlan.patch_level) { + case 0: + if (fcoe) { + encap = true; + outer_csum_ok = fcoe_fc_crc_ok; + } + break; + case 2: + if (type == 7 && (rss_hash & BIT(0))) { + encap = true; + outer_csum_ok = (rss_hash & BIT(1)) && + (rss_hash & BIT(2)); + } + break; + } + } + + /* Hardware does not provide whole packet checksum. It only + * provides pseudo checksum. Since hw validates the packet + * checksum but not provide us the checksum value. use + * CHECSUM_UNNECESSARY. + * + * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is + * inner csum_ok. outer_csum_ok is set by hw when outer udp + * csum is correct or is zero. + */ + if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && + tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = encap; + if (encap) + rqstats->csum_unnecessary_encap++; + else + rqstats->csum_unnecessary++; + } + + if (vlan_stripped) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + rqstats->vlan_stripped++; + } +} + +/* + * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which + * is identical for all type (16,32 and 64 byte) of cqs. + */ +static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port, + u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, + u8 *csum_not_calc, u32 *rss_hash, + u16 *bytes_written, u8 *packet_error, + u8 *vlan_stripped, u16 *vlan_tci, + u16 *checksum, u8 *fcoe_sof, + u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, + u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp, + u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4, + u8 *ipv4_fragment, u8 *fcs_ok) +{ + u16 completed_index_flags; + u16 q_number_rss_type_flags; + u16 bytes_written_flags; + + completed_index_flags = le16_to_cpu(desc->completed_index_flags); + q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = le32_to_cpu(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + /* + * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) + */ + *vlan_tci = le16_to_cpu(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = le16_to_cpu(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok, + u16 bytes_written) +{ + struct enic *enic = vnic_dev_priv(vrq->vdev); + struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats; + + if (packet_error) { + if (!fcs_ok) { + if (bytes_written > 0) + rqstats->bad_fcs++; + else if (bytes_written == 0) + rqstats->pkt_truncated++; + } + return true; + } + return false; +} + +int enic_rq_alloc_buf(struct vnic_rq *rq) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct net_device *netdev = enic->netdev; + struct enic_rq *erq = &enic->rq[rq->index]; + struct enic_rq_stats *rqstats = &erq->stats; + unsigned int offset = 0; + unsigned int len = netdev->mtu + VLAN_ETH_HLEN; + unsigned int os_buf_index = 0; + dma_addr_t dma_addr; + struct vnic_rq_buf *buf = rq->to_use; + struct page *page; + unsigned int truesize = len; + + if (buf->os_buf) { + enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, + buf->len); + + return 0; + } + + page = page_pool_dev_alloc(erq->pool, &offset, &truesize); + if (unlikely(!page)) { + rqstats->pp_alloc_fail++; + return -ENOMEM; + } + buf->offset = offset; + buf->truesize = truesize; + dma_addr = page_pool_get_dma_addr(page) + offset; + enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len); + + return 0; +} + +void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct enic_rq *erq = &enic->rq[rq->index]; + + if (!buf->os_buf) + return; + + page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true); + buf->os_buf = NULL; +} + +static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq, + struct vnic_rq_buf *buf, void *cq_desc, + u8 type, u16 q_number, u16 completed_index) +{ + struct sk_buff *skb; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats; + struct napi_struct *napi; + + u8 eop, sop, ingress_port, vlan_stripped; + u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; + u8 packet_error; + u16 bytes_written, vlan_tci, checksum; + u32 rss_hash; + + rqstats->packets++; + + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port, + &fcoe, &eop, &sop, &rss_type, &csum_not_calc, + &rss_hash, &bytes_written, &packet_error, + &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, + &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, + &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, + &ipv4, &ipv4_fragment, &fcs_ok); + + if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written)) + return; + + if (eop && bytes_written > 0) { + /* Good receive + */ + rqstats->bytes += bytes_written; + napi = &enic->napi[rq->index]; + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n", + enic->netdev->name, rq->index, + completed_index); + rqstats->no_skb++; + return; + } + + prefetch(skb->data - NET_IP_ALIGN); + + dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, + bytes_written, DMA_FROM_DEVICE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + (struct page *)buf->os_buf, buf->offset, + bytes_written, buf->truesize); + skb_record_rx_queue(skb, q_number); + enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe, + fcoe_fc_crc_ok, vlan_stripped, + csum_not_calc, tcp_udp_csum_ok, ipv6, + ipv4_csum_ok, vlan_tci, skb); + skb_mark_for_recycle(skb); + napi_gro_frags(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_intr_update_pkt_size(&cq->pkt_size_counter, + bytes_written); + buf->os_buf = NULL; + buf->dma_addr = 0; + buf = buf->next; + } else { + /* Buffer overflow + */ + rqstats->pkt_truncated++; + } +} + +static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type, + u16 q_number, u16 completed_index) +{ + struct enic_rq_stats *rqstats = &enic->rq[q_number].stats; + struct vnic_rq *vrq = &enic->rq[q_number].vrq; + struct vnic_rq_buf *vrq_buf = vrq->to_clean; + int skipped; + + while (1) { + skipped = (vrq_buf->index != completed_index); + if (!skipped) + enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type, + q_number, completed_index); + else + rqstats->desc_skip++; + + vrq->ring.desc_avail++; + vrq->to_clean = vrq_buf->next; + vrq_buf = vrq_buf->next; + if (!skipped) + break; + } +} + +unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do) +{ + struct vnic_cq *cq = &enic->cq[cq_index]; + void *cq_desc = vnic_cq_to_clean(cq); + u16 q_number, completed_index; + unsigned int work_done = 0; + u8 type, color; + + enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number, + &completed_index); + + while (color != cq->last_color) { + enic_rq_service(enic, cq_desc, type, q_number, completed_index); + vnic_cq_inc_to_clean(cq); + + if (++work_done >= work_to_do) + break; + + cq_desc = vnic_cq_to_clean(cq); + enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, + &q_number, &completed_index); + } + + return work_done; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h new file mode 100644 index 0000000000000..98476a7297afb --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_rq.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright 2024 Cisco Systems, Inc. All rights reserved. + */ + +unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do); +int enic_rq_alloc_buf(struct vnic_rq *rq); +void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c new file mode 100644 index 0000000000000..07936f8b42317 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_wq.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2025 Cisco Systems, Inc. All rights reserved. + +#include +#include "enic_res.h" +#include "enic.h" +#include "enic_wq.h" + +#define ENET_CQ_DESC_COMP_NDX_BITS 14 +#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0) + +static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq, + u8 *type, u8 *color, u16 *q_number, + u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + + if (ext_wq) + *completed_index = le16_to_cpu(desc->completed_index) & + ENET_CQ_DESC_COMP_NDX_MASK; + else + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(wq->vdev); + + if (buf->sop) + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + else + dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + + if (buf->os_buf) + dev_kfree_skb_any(buf->os_buf); +} + +static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, void *opaque) +{ + struct enic *enic = vnic_dev_priv(wq->vdev); + + enic->wq[wq->index].stats.cq_work++; + enic->wq[wq->index].stats.cq_bytes += buf->len; + enic_free_wq_buf(wq, buf); +} + +static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index) +{ + struct enic *enic = vnic_dev_priv(vdev); + + spin_lock(&enic->wq[q_number].lock); + + vnic_wq_service(&enic->wq[q_number].vwq, cq_desc, + completed_index, enic_wq_free_buf, NULL); + + if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) + && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >= + (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { + netif_wake_subqueue(enic->netdev, q_number); + enic->wq[q_number].stats.wake++; + } + + spin_unlock(&enic->wq[q_number].lock); +} + +unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do) +{ + struct vnic_cq *cq = &enic->cq[cq_index]; + u16 q_number, completed_index; + unsigned int work_done = 0; + struct cq_desc *cq_desc; + u8 type, color; + bool ext_wq; + + ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT; + + cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq); + enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + enic_wq_service(cq->vdev, cq_desc, type, q_number, + completed_index); + + vnic_cq_inc_to_clean(cq); + + if (++work_done >= work_to_do) + break; + + cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq); + enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color, + &q_number, &completed_index); + } + + return work_done; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h new file mode 100644 index 0000000000000..12acb3f2fbc94 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_wq.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright 2025 Cisco Systems, Inc. All rights reserved. + */ + +void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); +unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do); diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h index eed5bf59e5d2c..0e37f5d5e5272 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h @@ -56,45 +56,18 @@ struct vnic_cq { ktime_t prev_ts; }; -static inline unsigned int vnic_cq_service(struct vnic_cq *cq, - unsigned int work_to_do, - int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque), - void *opaque) +static inline void *vnic_cq_to_clean(struct vnic_cq *cq) { - struct cq_desc *cq_desc; - unsigned int work_done = 0; - u16 q_number, completed_index; - u8 type, color; - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); - - while (color != cq->last_color) { - - if ((*q_service)(cq->vdev, cq_desc, type, - q_number, completed_index, opaque)) - break; - - cq->to_clean++; - if (cq->to_clean == cq->ring.desc_count) { - cq->to_clean = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); + return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); +} - work_done++; - if (work_done >= work_to_do) - break; +static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq) +{ + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; } - - return work_done; } void vnic_cq_free(struct vnic_cq *cq); diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 12a83fa1302d7..9f6089e816081 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -146,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res); static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { - /* The base address of the desc rings must be 512 byte aligned. - * Descriptor count is aligned to groups of 32 descriptors. A - * count of 0 means the maximum 4096 descriptors. Descriptor - * size is aligned to 16 bytes. - */ - - unsigned int count_align = 32; - unsigned int desc_align = 16; - ring->base_align = 512; + /* Descriptor ring base address alignment in bytes*/ + ring->base_align = VNIC_DESC_BASE_ALIGN; + /* A count of 0 means the maximum descriptors */ if (desc_count == 0) - desc_count = 4096; + desc_count = VNIC_DESC_MAX_COUNT; - ring->desc_count = ALIGN(desc_count, count_align); + /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */ + ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN); - ring->desc_size = ALIGN(desc_size, desc_align); + /* Descriptor size alignment in bytes */ + ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN); ring->size = ring->desc_count * ring->desc_size; ring->size_unaligned = ring->size + ring->base_align; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 6273794b923b3..7fdd8c661c99d 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -31,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg) #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define VNIC_DESC_SIZE_ALIGN 16 +#define VNIC_DESC_COUNT_ALIGN 32 +#define VNIC_DESC_BASE_ALIGN 512 +#define VNIC_DESC_MAX_COUNT 4096 + enum vnic_dev_intr_mode { VNIC_DEV_INTR_MODE_UNKNOWN, VNIC_DEV_INTR_MODE_INTX, diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index db56d778877a7..605ef17f967e4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -436,6 +436,25 @@ enum vnic_devcmd_cmd { * in: (u16) a2 = unsigned short int port information */ CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73), + + /* + * Set extended CQ field in MREGS of RQ (or all RQs) + * for given vNIC + * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs) + * (u32) a1 = CQ entry size + * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes + * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes + * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes + * + * Capability query: + * out: (u32) a0 = errno, 0:valid cmd + * (u32) a1 = value consisting of supported entries + * bit 0: 16 bytes + * bit 1: 32 bytes + * bit 2: 64 bytes + */ + CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90), + }; /* CMD_ENABLE2 flags */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h index 5acc236069dea..9e8e86262a3fe 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_enet.h +++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h @@ -21,6 +21,11 @@ struct vnic_enet_config { u16 loop_tag; u16 vf_rq_count; u16 num_arfs; + u8 reserved[66]; + u32 max_rq_ring; // MAX RQ ring size + u32 max_wq_ring; // MAX WQ ring size + u32 max_cq_ring; // MAX CQ ring size + u32 rdma_rsvd_lkey; // Reserved (privileged) LKey }; #define VENETF_TSO 0x1 /* TSO enabled */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index 0bc595abc03b5..a1cdd729caece 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -50,7 +50,7 @@ struct vnic_rq_ctrl { (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) +#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384) struct vnic_rq_buf { struct vnic_rq_buf *next; @@ -61,6 +61,8 @@ struct vnic_rq_buf { unsigned int index; void *desc; uint64_t wr_id; + unsigned int offset; + unsigned int truesize; }; enum enic_poll_state { diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c index 20fcb20b42ede..66b5778353389 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_vic.c +++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c @@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, tlv->type = htons(type); tlv->length = htons(length); - memcpy(tlv->value, value, length); + unsafe_memcpy(tlv->value, value, length, + /* Flexible array of flexible arrays */); vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1); vp->length = htonl(ntohl(vp->length) + diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 75c5269110744..3bb4758100ba4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -62,7 +62,7 @@ struct vnic_wq_buf { (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384) struct vnic_wq { unsigned int index; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 759ff42a087d7..1837db6aad73a 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -351,7 +351,7 @@ config ICE_SWITCHDEV config ICE_HWTS bool "Support HW cross-timestamp on platforms with PTM support" default y - depends on ICE && X86 + depends on ICE && X86 && PCIE_PTM help Say Y to enable hardware supported cross-timestamping on platforms with PCIe PTM support. The cross-timestamp is available through diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index eb2e0828110a6..bbeb93d84bc92 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3137,10 +3137,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) const u8 *addr = al->list[i].addr; /* Allow to delete VF primary MAC only if it was not set - * administratively by PF or if VF is trusted. + * administratively by PF. */ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { - if (i40e_can_vf_change_mac(vf)) + if (!vf->pf_set_mac) was_unimac_deleted = true; else continue; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index afa837e467579..339df8f28310d 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -1047,10 +1047,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } -static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw) -{ - return hw->ptp.phy_model; -} - extern const struct xdp_metadata_ops ice_xdp_md_ops; #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 01a08cfd0090a..10285995c9edd 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only // SPDX-FileCopyrightText: Copyright Red Hat -#include #include #include #include @@ -14,32 +13,45 @@ static DEFINE_XARRAY(ice_adapters); static DEFINE_MUTEX(ice_adapters_mutex); -/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ -#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) -#define INDEX_FIELD_DEV GENMASK(31, 16) -#define INDEX_FIELD_BUS GENMASK(12, 5) -#define INDEX_FIELD_SLOT GENMASK(4, 0) +#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63) -static unsigned long ice_adapter_index(const struct pci_dev *pdev) -{ - unsigned int domain = pci_domain_nr(pdev->bus); - - WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); +#define ICE_ADAPTER_INDEX_E825C \ + (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX) +static u64 ice_adapter_index(struct pci_dev *pdev) +{ switch (pdev->device) { case ICE_DEV_ID_E825C_BACKPLANE: case ICE_DEV_ID_E825C_QSFP: case ICE_DEV_ID_E825C_SFP: case ICE_DEV_ID_E825C_SGMII: - return FIELD_PREP(INDEX_FIELD_DEV, pdev->device); + /* E825C devices have multiple NACs which are connected to the + * same clock source, and which must share the same + * ice_adapter structure. We can't use the serial number since + * each NAC has its own NVM generated with its own unique + * Device Serial Number. Instead, rely on the embedded nature + * of the E825C devices, and use a fixed index. This relies on + * the fact that all E825C physical functions in a given + * system are part of the same overall device. + */ + return ICE_ADAPTER_INDEX_E825C; default: - return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | - FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | - FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); + return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX; } } -static struct ice_adapter *ice_adapter_new(void) +static unsigned long ice_adapter_xa_index(struct pci_dev *pdev) +{ + u64 index = ice_adapter_index(pdev); + +#if BITS_PER_LONG == 64 + return index; +#else + return (u32)index ^ (u32)(index >> 32); +#endif +} + +static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev) { struct ice_adapter *adapter; @@ -47,6 +59,7 @@ static struct ice_adapter *ice_adapter_new(void) if (!adapter) return NULL; + adapter->index = ice_adapter_index(pdev); spin_lock_init(&adapter->ptp_gltsyn_time_lock); refcount_set(&adapter->refcount, 1); @@ -77,23 +90,25 @@ static void ice_adapter_free(struct ice_adapter *adapter) * Return: Pointer to ice_adapter on success. * ERR_PTR() on error. -ENOMEM is the only possible error. */ -struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) { - unsigned long index = ice_adapter_index(pdev); struct ice_adapter *adapter; + unsigned long index; int err; + index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); if (err == -EBUSY) { adapter = xa_load(&ice_adapters, index); refcount_inc(&adapter->refcount); + WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); return adapter; } if (err) return ERR_PTR(err); - adapter = ice_adapter_new(); + adapter = ice_adapter_new(pdev); if (!adapter) return ERR_PTR(-ENOMEM); xa_store(&ice_adapters, index, adapter, GFP_KERNEL); @@ -110,11 +125,12 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) * * Context: Process, may sleep. */ -void ice_adapter_put(const struct pci_dev *pdev) +void ice_adapter_put(struct pci_dev *pdev) { - unsigned long index = ice_adapter_index(pdev); struct ice_adapter *adapter; + unsigned long index; + index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { adapter = xa_load(&ice_adapters, index); if (WARN_ON(!adapter)) diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h index a91978187d5be..3802944467cc5 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.h +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -32,6 +32,7 @@ struct ice_port_list { * @refcount: Reference count. struct ice_pf objects hold the references. * @ctrl_pf: Control PF of the adapter * @ports: Ports list + * @index: 64-bit index cached for collision detection on 32bit systems */ struct ice_adapter { refcount_t refcount; @@ -40,9 +41,10 @@ struct ice_adapter { struct ice_pf *ctrl_pf; struct ice_port_list ports; + u64 index; }; -struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); -void ice_adapter_put(const struct pci_dev *pdev); +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev); +void ice_adapter_put(struct pci_dev *pdev); #endif /* _ICE_ADAPTER_H */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1f01f3501d6bf..abe4316f60133 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1648,6 +1648,7 @@ struct ice_aqc_get_port_options_elem { #define ICE_AQC_PORT_OPT_MAX_LANE_25G 5 #define ICE_AQC_PORT_OPT_MAX_LANE_50G 6 #define ICE_AQC_PORT_OPT_MAX_LANE_100G 7 +#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8 u8 global_scid[2]; u8 phy_scid[2]; @@ -2247,6 +2248,8 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[]; }; +#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0) + /* Get CGU abilities command response data structure (indirect 0x0C61) */ struct ice_aqc_get_cgu_abilities { u8 num_inputs; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 9a2e6b26d768e..00dd9339651e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -185,7 +185,7 @@ static int ice_set_mac_type(struct ice_hw *hw) * ice_is_generic_mac - check if device's mac_type is generic * @hw: pointer to the hardware structure * - * Return: true if mac_type is generic (with SBQ support), false if not + * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise. */ bool ice_is_generic_mac(struct ice_hw *hw) { @@ -193,120 +193,6 @@ bool ice_is_generic_mac(struct ice_hw *hw) hw->mac_type == ICE_MAC_GENERIC_3K_E825); } -/** - * ice_is_e810 - * @hw: pointer to the hardware structure - * - * returns true if the device is E810 based, false if not. - */ -bool ice_is_e810(struct ice_hw *hw) -{ - return hw->mac_type == ICE_MAC_E810; -} - -/** - * ice_is_e810t - * @hw: pointer to the hardware structure - * - * returns true if the device is E810T based, false if not. - */ -bool ice_is_e810t(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E810C_SFP: - switch (hw->subsystem_device_id) { - case ICE_SUBDEV_ID_E810T: - case ICE_SUBDEV_ID_E810T2: - case ICE_SUBDEV_ID_E810T3: - case ICE_SUBDEV_ID_E810T4: - case ICE_SUBDEV_ID_E810T6: - case ICE_SUBDEV_ID_E810T7: - return true; - } - break; - case ICE_DEV_ID_E810C_QSFP: - switch (hw->subsystem_device_id) { - case ICE_SUBDEV_ID_E810T2: - case ICE_SUBDEV_ID_E810T3: - case ICE_SUBDEV_ID_E810T5: - return true; - } - break; - default: - break; - } - - return false; -} - -/** - * ice_is_e822 - Check if a device is E822 family device - * @hw: pointer to the hardware structure - * - * Return: true if the device is E822 based, false if not. - */ -bool ice_is_e822(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E822C_BACKPLANE: - case ICE_DEV_ID_E822C_QSFP: - case ICE_DEV_ID_E822C_SFP: - case ICE_DEV_ID_E822C_10G_BASE_T: - case ICE_DEV_ID_E822C_SGMII: - case ICE_DEV_ID_E822L_BACKPLANE: - case ICE_DEV_ID_E822L_SFP: - case ICE_DEV_ID_E822L_10G_BASE_T: - case ICE_DEV_ID_E822L_SGMII: - return true; - default: - return false; - } -} - -/** - * ice_is_e823 - * @hw: pointer to the hardware structure - * - * returns true if the device is E823-L or E823-C based, false if not. - */ -bool ice_is_e823(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E823L_BACKPLANE: - case ICE_DEV_ID_E823L_SFP: - case ICE_DEV_ID_E823L_10G_BASE_T: - case ICE_DEV_ID_E823L_1GBE: - case ICE_DEV_ID_E823L_QSFP: - case ICE_DEV_ID_E823C_BACKPLANE: - case ICE_DEV_ID_E823C_QSFP: - case ICE_DEV_ID_E823C_SFP: - case ICE_DEV_ID_E823C_10G_BASE_T: - case ICE_DEV_ID_E823C_SGMII: - return true; - default: - return false; - } -} - -/** - * ice_is_e825c - Check if a device is E825C family device - * @hw: pointer to the hardware structure - * - * Return: true if the device is E825-C based, false if not. - */ -bool ice_is_e825c(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E825C_BACKPLANE: - case ICE_DEV_ID_E825C_QSFP: - case ICE_DEV_ID_E825C_SFP: - case ICE_DEV_ID_E825C_SGMII: - return true; - default: - return false; - } -} - /** * ice_is_pf_c827 - check if pf contains c827 phy * @hw: pointer to the hw struct @@ -2388,7 +2274,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - if (!ice_is_e825c(hw)) { + if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) { info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); } else { @@ -4117,6 +4003,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } +/** + * ice_get_phy_lane_number - Get PHY lane number for current adapter + * @hw: pointer to the hw struct + * + * Return: PHY lane number on success, negative error code otherwise. + */ +int ice_get_phy_lane_number(struct ice_hw *hw) +{ + struct ice_aqc_get_port_options_elem *options; + unsigned int lport = 0; + unsigned int lane; + int err; + + options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); + if (!options) + return -ENOMEM; + + for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) { + u8 options_count = ICE_AQC_PORT_OPT_MAX; + u8 speed, active_idx, pending_idx; + bool active_valid, pending_valid; + + err = ice_aq_get_port_options(hw, options, &options_count, lane, + true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (err) + goto err; + + if (!active_valid) + continue; + + speed = options[active_idx].max_lane_speed; + /* If we don't get speed for this lane, it's unoccupied */ + if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G) + continue; + + if (hw->pf_id == lport) { + kfree(options); + return lane; + } + + lport++; + } + + /* PHY lane not found */ + err = -ENXIO; +err: + kfree(options); + return err; +} + /** * ice_aq_sff_eeprom * @hw: pointer to the HW struct @@ -5871,6 +5808,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } +/** + * ice_get_pca9575_handle - find and return the PCA9575 controller + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value. + * + * Return: 0 on success, -ENXIO when there's no PCA9575 present. + */ +int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + int err; + u8 idx; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -ENXIO; + + /* If handle was not detected read it from the netlist */ + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + cmd = &desc.params.get_link_topo; + cmd->addr.topo_params.node_type_ctx = + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL; + cmd->addr.topo_params.index = idx; + + err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (err) + return -ENXIO; + + /* Verify if we found the right IO expander type */ + if (desc.params.get_link_topo.node_part_num != + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) + return -ENXIO; + + /* If present save the handle and return it */ + hw->io_expander_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_read_pca9575_reg - read the register from the PCA9575 controller + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: pointer to data to be read from the GPIO controller + * + * Return: 0 on success, negative error code otherwise. + */ +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + u16 handle; + int err; + + memset(&link_topo, 0, sizeof(link_topo)); + + err = ice_get_pca9575_handle(hw, &handle); + if (err) + return err; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, + ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); +} + /** * ice_aq_set_gpio * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 3b739a7fe498d..eae8711acd46e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -132,7 +132,6 @@ int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); bool ice_is_generic_mac(struct ice_hw *hw); -bool ice_is_e810(struct ice_hw *hw); int ice_clear_pf_cfg(struct ice_hw *hw); int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, @@ -192,6 +191,7 @@ ice_aq_get_port_options(struct ice_hw *hw, int ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, u8 new_option); +int ice_get_phy_lane_number(struct ice_hw *hw); int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, @@ -274,10 +274,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); -bool ice_is_e810t(struct ice_hw *hw); -bool ice_is_e822(struct ice_hw *hw); -bool ice_is_e823(struct ice_hw *hw); -bool ice_is_e825c(struct ice_hw *hw); int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); @@ -304,5 +300,7 @@ int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, const u8 *data, struct ice_sq_cd *cd); +int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle); +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 4bcd002940bfd..52a1bdf1d5386 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2312,14 +2312,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; - if (ice_is_e825c(hw)) + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; } - if (!ice_is_e825c(hw)) + if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index d5ad6d84007c2..38e151c7ea236 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -2064,6 +2064,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf) return 0; } +/** + * ice_dpll_phase_range_set - initialize phase adjust range helper + * @range: pointer to phase adjust range struct to be initialized + * @phase_adj: a value to be used as min(-)/max(+) boundary + */ +static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range, + u32 phase_adj) +{ + range->min = -phase_adj; + range->max = phase_adj; +} + /** * ice_dpll_init_info_pins_generic - initializes generic pins info * @pf: board private structure @@ -2105,8 +2117,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input) for (i = 0; i < pin_num; i++) { pins[i].idx = i; pins[i].prop.board_label = labels[i]; - pins[i].prop.phase_range.min = phase_adj_max; - pins[i].prop.phase_range.max = -phase_adj_max; + ice_dpll_phase_range_set(&pins[i].prop.phase_range, + phase_adj_max); pins[i].prop.capabilities = cap; pins[i].pf = pf; ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); @@ -2152,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, struct ice_hw *hw = &pf->hw; struct ice_dpll_pin *pins; unsigned long caps; + u32 phase_adj_max; u8 freq_supp_num; bool input; @@ -2159,11 +2172,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, case ICE_DPLL_PIN_TYPE_INPUT: pins = pf->dplls.inputs; num_pins = pf->dplls.num_inputs; + phase_adj_max = pf->dplls.input_phase_adj_max; input = true; break; case ICE_DPLL_PIN_TYPE_OUTPUT: pins = pf->dplls.outputs; num_pins = pf->dplls.num_outputs; + phase_adj_max = pf->dplls.output_phase_adj_max; input = false; break; default: @@ -2188,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, return ret; caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); - pins[i].prop.phase_range.min = - pf->dplls.input_phase_adj_max; - pins[i].prop.phase_range.max = - -pf->dplls.input_phase_adj_max; } else { - pins[i].prop.phase_range.min = - pf->dplls.output_phase_adj_max; - pins[i].prop.phase_range.max = - -pf->dplls.output_phase_adj_max; ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); if (ret) return ret; } + ice_dpll_phase_range_set(&pins[i].prop.phase_range, + phase_adj_max); pins[i].prop.capabilities = caps; ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); if (ret) @@ -2308,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu) dp->dpll_idx = abilities.pps_dpll_idx; d->num_inputs = abilities.num_inputs; d->num_outputs = abilities.num_outputs; - d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj); - d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj); + d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) & + ICE_AQC_GET_CGU_MAX_PHASE_ADJ; + d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) & + ICE_AQC_GET_CGU_MAX_PHASE_ADJ; alloc_size = sizeof(*d->inputs) * d->num_inputs; d->inputs = kzalloc(alloc_size, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index f02e8ca553750..68b8d453d5df7 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf) } /** - * ice_gnss_is_gps_present - Check if GPS HW is present + * ice_gnss_is_module_present - Check if GNSS HW is present * @hw: pointer to HW struct + * + * Return: true when GNSS is present, false otherwise. */ -bool ice_gnss_is_gps_present(struct ice_hw *hw) +bool ice_gnss_is_module_present(struct ice_hw *hw) { - if (!hw->func_caps.ts_func_info.src_tmr_owned) - return false; + int err; + u8 data; - if (!ice_is_gps_in_netlist(hw)) + if (!hw->func_caps.ts_func_info.src_tmr_owned || + !ice_is_gps_in_netlist(hw)) return false; -#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) - if (ice_is_e810t(hw)) { - int err; - u8 data; - - err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); - if (err || !!(data & ICE_P0_GNSS_PRSNT_N)) - return false; - } else { - return false; - } -#else - if (!ice_is_e810t(hw)) + err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); + if (err || !!(data & ICE_P0_GNSS_PRSNT_N)) return false; -#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ return true; } diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index 75e567ad70594..15daf603ed7bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -37,11 +37,11 @@ struct gnss_serial { #if IS_ENABLED(CONFIG_GNSS) void ice_gnss_init(struct ice_pf *pf); void ice_gnss_exit(struct ice_pf *pf); -bool ice_gnss_is_gps_present(struct ice_hw *hw); +bool ice_gnss_is_module_present(struct ice_hw *hw); #else static inline void ice_gnss_init(struct ice_pf *pf) { } static inline void ice_gnss_exit(struct ice_pf *pf) { } -static inline bool ice_gnss_is_gps_present(struct ice_hw *hw) +static inline bool ice_gnss_is_module_present(struct ice_hw *hw) { return false; } diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 8d31bfe28cc88..b692be1cf7bfe 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -533,10 +533,22 @@ #define PFPM_WUS_MAG_M BIT(1) #define PFPM_WUS_MNG_M BIT(3) #define PFPM_WUS_FW_RST_WK_M BIT(31) +#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 +#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 #define E830_PRTMAC_CL01_PS_QNT 0x001E32A0 #define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0) #define E830_PRTMAC_CL01_QNT_THR 0x001E3320 #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) +#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) +#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) +#define E830_GLPTM_ART_CTL 0x00088B50 +#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0) +#define E830_GLPTM_ART_TIME_H 0x00088B54 +#define E830_GLPTM_ART_TIME_L 0x00088B58 +#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) +#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) +#define E830_PFPTM_SEM 0x00088B00 +#define E830_PFPTM_SEM_BUSY_M BIT(0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index a19e1044b3637..109f9d1e6e76c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3885,7 +3885,7 @@ void ice_init_feature_support(struct ice_pf *pf) ice_set_feature_support(pf, ICE_F_CGU); if (ice_is_clock_mux_in_netlist(&pf->hw)) ice_set_feature_support(pf, ICE_F_SMA_CTRL); - if (ice_gnss_is_gps_present(&pf->hw)) + if (ice_gnss_is_module_present(&pf->hw)) ice_set_feature_support(pf, ICE_F_GNSS); break; default: diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a6d1c53fb4445..a784014cd0f99 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1158,7 +1158,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return 0; - ice_ptp_link_change(pf, pf->hw.pf_id, link_up); + ice_ptp_link_change(pf, link_up); if (ice_is_dcb_active(pf)) { if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) @@ -3188,12 +3188,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) hw = &pf->hw; tx = &pf->ptp.port.tx; spin_lock_irqsave(&tx->lock, flags); - ice_ptp_complete_tx_single_tstamp(tx); + if (tx->init) { + ice_ptp_complete_tx_single_tstamp(tx); - idx = find_next_bit_wrap(tx->in_use, tx->len, - tx->last_ll_ts_idx_read + 1); - if (idx != tx->len) - ice_ptp_req_tx_single_tstamp(tx, idx); + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + } spin_unlock_irqrestore(&tx->lock, flags); val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | @@ -3295,22 +3297,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (ice_pf_state_is_nominal(pf) && - pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { - struct ice_ptp_tx *tx = &pf->ptp.port.tx; - unsigned long flags; - u8 idx; - - spin_lock_irqsave(&tx->lock, flags); - idx = find_next_bit_wrap(tx->in_use, tx->len, - tx->last_ll_ts_idx_read + 1); - if (idx != tx->len) - ice_ptp_req_tx_single_tstamp(tx, idx); - spin_unlock_irqrestore(&tx->lock, flags); - } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { - set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); - ret = IRQ_WAKE_THREAD; - } + + ret = ice_ptp_ts_irq(pf); } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -4057,8 +4045,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) } clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); - if (func_caps->common_cap.ieee_1588 && - !(pf->hw.mac_type == ICE_MAC_E830)) + if (func_caps->common_cap.ieee_1588) set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); pf->max_pf_txqs = func_caps->common_cap.num_txq; @@ -5045,6 +5032,12 @@ static int ice_init(struct ice_pf *pf) if (err) return err; + if (pf->hw.mac_type == ICE_MAC_E830) { + err = pci_enable_ptm(pf->pdev, NULL); + if (err) + dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); + } + err = ice_alloc_vsis(pf); if (err) goto err_alloc_vsis; @@ -6734,7 +6727,7 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); - ice_ptp_link_change(pf, pf->hw.pf_id, true); + ice_ptp_link_change(pf, true); } /* Perform an initial read of the statistics registers now to @@ -7204,7 +7197,7 @@ int ice_down(struct ice_vsi *vsi) if (vsi->netdev) { vlan_err = ice_vsi_del_vlan_zero(vsi); - ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); + ice_ptp_link_change(vsi->back, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 8b210110454bf..d83931d7bff99 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = { }; static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { - /* name, gpio */ - { TIME_SYNC, { 4, -1 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { TIME_SYNC, { 4, -1 }, { 0, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 11 }}, }; static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { - /* name, gpio */ - { SDP0, { 0, 0 }}, - { SDP1, { 1, 1 }}, - { SDP2, { 2, 2 }}, - { SDP3, { 3, 3 }}, - { TIME_SYNC, { 4, -1 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 15, 14 }}, + { SDP1, { 1, 1 }, { 15, 14 }}, + { SDP2, { 2, 2 }, { 15, 14 }}, + { SDP3, { 3, 3 }, { 15, 14 }}, + { TIME_SYNC, { 4, -1 }, { 11, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 9 }}, }; static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { - /* name, gpio */ - { SDP0, { 0, 0 }}, - { SDP1, { 1, 1 }}, - { SDP2, { 2, 2 }}, - { SDP3, { 3, 3 }}, - { ONE_PPS, { -1, 5 }}, + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 0, 1 }}, + { SDP1, { 1, 1 }, { 0, 1 }}, + { SDP2, { 2, 2 }, { 0, 1 }}, + { SDP3, { 3, 3 }, { 0, 1 }}, + { ONE_PPS, { -1, 5 }, { 0, 1 }}, }; static const char ice_pin_names_nvm[][64] = { @@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = { }; static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { - /* name, gpio */ - { GNSS, { 1, -1 }}, - { SMA1, { 1, 0 }}, - { UFL1, { -1, 0 }}, - { SMA2, { 3, 2 }}, - { UFL2, { 3, -1 }}, + /* name, gpio, delay */ + { GNSS, { 1, -1 }, { 0, 0 }}, + { SMA1, { 1, 0 }, { 0, 1 }}, + { UFL1, { -1, 0 }, { 0, 1 }}, + { SMA2, { 3, 2 }, { 0, 1 }}, + { UFL2, { 3, -1 }, { 0, 0 }}, }; static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) @@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) /* Read the system timestamp pre PHC read */ ptp_read_system_prets(sts); + if (hw->mac_type == ICE_MAC_E830) { + u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx)); + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + return clk_time; + } + lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); /* Read the system timestamp post PHC read */ @@ -971,28 +980,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) tx->len = 0; } -/** - * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps - * @pf: Board private structure - * @tx: the Tx tracking structure to initialize - * @port: the port this structure tracks - * - * Initialize the Tx timestamp tracker for this port. ETH56G PHYs - * have independent memory blocks for all ports. - * - * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker - */ -static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, - u8 port) -{ - tx->block = port; - tx->offset = 0; - tx->len = INDEX_PER_PORT_ETH56G; - tx->has_ready_bitmap = 1; - - return ice_ptp_alloc_tx_tracker(tx); -} - /** * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure @@ -1003,9 +990,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, * the timestamp block is shared for all ports in the same quad. To avoid * ports using the same timestamp index, logically break the block of * registers into chunks based on the port number. + * + * Return: 0 on success, -ENOMEM when out of memory */ -static int -ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, + u8 port) { tx->block = ICE_GET_QUAD_NUM(port); tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; @@ -1016,24 +1005,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) } /** - * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps + * ice_ptp_init_tx - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks + * + * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X, + * each port has its own block of timestamps, independent of the other ports. * - * Initialize the Tx timestamp tracker for this PF. For E810 devices, each - * port has its own block of timestamps, independent of the other ports. + * Return: 0 on success, -ENOMEM when out of memory */ -static int -ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) +static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { - tx->block = pf->hw.port_info->lport; + tx->block = port; tx->offset = 0; - tx->len = INDEX_PER_PORT_E810; + tx->len = INDEX_PER_PORT; + /* The E810 PHY does not provide a timestamp ready bitmap. Instead, * verify new timestamps against cached copy of the last read * timestamp. */ - tx->has_ready_bitmap = 0; + tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810; return ice_ptp_alloc_tx_tracker(tx); } @@ -1318,20 +1310,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) struct ice_hw *hw = &pf->hw; int err; - if (ice_is_e810(hw)) - return 0; - mutex_lock(&ptp_port->ps_lock); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_stop_phy_timer_eth56g(hw, port, true); + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + err = 0; break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: kthread_cancel_delayed_work_sync(&ptp_port->ov_work); err = ice_stop_phy_timer_e82x(hw, port, true); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_stop_phy_timer_eth56g(hw, port, true); + break; default: err = -ENODEV; } @@ -1361,19 +1354,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) unsigned long flags; int err; - if (ice_is_e810(hw)) - return 0; - if (!ptp_port->link_up) return ice_ptp_port_phy_stop(ptp_port); mutex_lock(&ptp_port->ps_lock); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_start_phy_timer_eth56g(hw, port); + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + err = 0; break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: /* Start the PHY timer in Vernier mode */ kthread_cancel_delayed_work_sync(&ptp_port->ov_work); @@ -1398,6 +1389,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_start_phy_timer_eth56g(hw, port); + break; default: err = -ENODEV; } @@ -1414,10 +1408,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) /** * ice_ptp_link_change - Reconfigure PTP after link status change * @pf: Board private structure - * @port: Port for which the PHY start is set * @linkup: Link is up or down */ -void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { struct ice_ptp_port *ptp_port; struct ice_hw *hw = &pf->hw; @@ -1425,14 +1418,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) if (pf->ptp.state != ICE_PTP_READY) return; - if (WARN_ON_ONCE(port >= hw->ptp.num_lports)) - return; - ptp_port = &pf->ptp.port; - if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) - port *= 2; - if (WARN_ON_ONCE(ptp_port->port_num != port)) - return; /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; @@ -1440,12 +1426,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) /* Skip HW writes if reset is in progress */ if (pf->hw.reset_ongoing) return; - switch (ice_get_phy_model(hw)) { - case ICE_PHY_E810: - /* Do not reconfigure E810 PHY */ + + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + /* Do not reconfigure E810 or E830 PHY */ return; - case ICE_PHY_ETH56G: - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: ice_ptp_port_phy_restart(ptp_port); return; default: @@ -1473,46 +1461,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) ice_ptp_reset_ts_memory(hw); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: { - int port; + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + return 0; + case ICE_MAC_GENERIC: { + int quad; - for (port = 0; port < hw->ptp.num_lports; port++) { + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); + quad++) { int err; - err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); + err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); if (err) { - dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", - port, err); + dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", + quad, err); return err; } } return 0; } - case ICE_PHY_E82X: { - int quad; + case ICE_MAC_GENERIC_3K_E825: { + int port; - for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); - quad++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; - err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); + err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); if (err) { - dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", - quad, err); + dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", + port, err); return err; } } return 0; } - case ICE_PHY_E810: - return 0; - case ICE_PHY_UNSUP: + case ICE_MAC_UNKNOWN: default: - dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, - ice_get_phy_model(hw)); return -EOPNOTSUPP; } } @@ -1592,18 +1579,29 @@ void ice_ptp_extts_event(struct ice_pf *pf) * Event is defined in GLTSYN_EVNT_0 register */ for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { + int pin_desc_idx; + /* Check if channel is enabled */ - if (pf->ptp.ext_ts_irq & (1 << chan)) { - lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); - hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); - event.timestamp = (((u64)hi) << 32) | lo; - event.type = PTP_CLOCK_EXTTS; - event.index = chan; - - /* Fire event */ - ptp_clock_event(pf->ptp.clock, &event); - pf->ptp.ext_ts_irq &= ~(1 << chan); + if (!(pf->ptp.ext_ts_irq & (1 << chan))) + continue; + + lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); + hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); + event.timestamp = (u64)hi << 32 | lo; + + /* Add delay compensation */ + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); + if (pin_desc_idx >= 0) { + const struct ice_ptp_pin_desc *desc; + + desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; + event.timestamp -= desc->delay[0]; } + + event.type = PTP_CLOCK_EXTTS; + event.index = chan; + pf->ptp.ext_ts_irq &= ~(1 << chan); + ptp_clock_event(pf->ptp.clock, &event); } } @@ -1737,7 +1735,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, /* 0. Reset mode & out_en in AUX_OUT */ wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); - if (ice_is_e825c(hw)) { + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) { int err; /* Enable/disable CGU 1PPS output for E825C */ @@ -1799,9 +1797,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, int on) { + unsigned int gpio_pin, prop_delay_ns; u64 clk, period, start, phase; struct ice_hw *hw = &pf->hw; - unsigned int gpio_pin; int pin_desc_idx; if (rq->flags & ~PTP_PEROUT_PHASE) @@ -1812,6 +1810,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, return -EIO; gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; + prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; /* If we're disabling the output or period is 0, clear out CLKO and TGT @@ -1821,7 +1820,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && - period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) { + period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) { dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); return -EOPNOTSUPP; } @@ -1844,11 +1843,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, * from now, so we have time to write it to HW. */ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500; - if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw)) + if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) start = div64_u64(clk + period - 1, period) * period + phase; /* Compensate for propagation delay from the generator to the pin. */ - start -= ice_prop_delay(hw); + start -= prop_delay_ns; return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); } @@ -2076,7 +2075,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) /* For Vernier mode on E82X, we need to recalibrate after new settime. * Start with marking timestamps as invalid. */ - if (ice_get_phy_model(hw) == ICE_PHY_E82X) { + if (hw->mac_type == ICE_MAC_GENERIC) { err = ice_ptp_clear_phy_offset_ready_e82x(hw); if (err) dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); @@ -2100,7 +2099,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_perout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (ice_get_phy_model(hw) == ICE_PHY_E82X) + if (hw->mac_type == ICE_MAC_GENERIC) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2178,93 +2177,158 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) return 0; } +/** + * struct ice_crosststamp_cfg - Device cross timestamp configuration + * @lock_reg: The hardware semaphore lock to use + * @lock_busy: Bit in the semaphore lock indicating the lock is busy + * @ctl_reg: The hardware register to request cross timestamp + * @ctl_active: Bit in the control register to request cross timestamp + * @art_time_l: Lower 32-bits of ART system time + * @art_time_h: Upper 32-bits of ART system time + * @dev_time_l: Lower 32-bits of device time (per timer index) + * @dev_time_h: Upper 32-bits of device time (per timer index) + */ +struct ice_crosststamp_cfg { + /* HW semaphore lock register */ + u32 lock_reg; + u32 lock_busy; + + /* Capture control register */ + u32 ctl_reg; + u32 ctl_active; + + /* Time storage */ + u32 art_time_l; + u32 art_time_h; + u32 dev_time_l[2]; + u32 dev_time_h[2]; +}; + +static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = { + .lock_reg = PFHH_SEM, + .lock_busy = PFHH_SEM_BUSY_M, + .ctl_reg = GLHH_ART_CTL, + .ctl_active = GLHH_ART_CTL_ACTIVE_M, + .art_time_l = GLHH_ART_TIME_L, + .art_time_h = GLHH_ART_TIME_H, + .dev_time_l[0] = GLTSYN_HHTIME_L(0), + .dev_time_h[0] = GLTSYN_HHTIME_H(0), + .dev_time_l[1] = GLTSYN_HHTIME_L(1), + .dev_time_h[1] = GLTSYN_HHTIME_H(1), +}; + #ifdef CONFIG_ICE_HWTS +static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = { + .lock_reg = E830_PFPTM_SEM, + .lock_busy = E830_PFPTM_SEM_BUSY_M, + .ctl_reg = E830_GLPTM_ART_CTL, + .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M, + .art_time_l = E830_GLPTM_ART_TIME_L, + .art_time_h = E830_GLPTM_ART_TIME_H, + .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0), + .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0), + .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1), + .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1), +}; + +#endif /* CONFIG_ICE_HWTS */ +/** + * struct ice_crosststamp_ctx - Device cross timestamp context + * @snapshot: snapshot of system clocks for historic interpolation + * @pf: pointer to the PF private structure + * @cfg: pointer to hardware configuration for cross timestamp + */ +struct ice_crosststamp_ctx { + struct system_time_snapshot snapshot; + struct ice_pf *pf; + const struct ice_crosststamp_cfg *cfg; +}; + /** - * ice_ptp_get_syncdevicetime - Get the cross time stamp info + * ice_capture_crosststamp - Capture a device/system cross timestamp * @device: Current device time * @system: System counter value read synchronously with device time - * @ctx: Context provided by timekeeping code + * @__ctx: Context passed from ice_ptp_getcrosststamp * * Read device and system (ART) clock simultaneously and return the corrected * clock values in ns. + * + * Return: zero on success, or a negative error code on failure. */ -static int -ice_ptp_get_syncdevicetime(ktime_t *device, - struct system_counterval_t *system, - void *ctx) +static int ice_capture_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *__ctx) { - struct ice_pf *pf = (struct ice_pf *)ctx; - struct ice_hw *hw = &pf->hw; - u32 hh_lock, hh_art_ctl; - int i; + struct ice_crosststamp_ctx *ctx = __ctx; + const struct ice_crosststamp_cfg *cfg; + u32 lock, ctl, ts_lo, ts_hi, tmr_idx; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + u64 ts; -#define MAX_HH_HW_LOCK_TRIES 5 -#define MAX_HH_CTL_LOCK_TRIES 100 + cfg = ctx->cfg; + pf = ctx->pf; + hw = &pf->hw; - for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { - /* Get the HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); - if (hh_lock & PFHH_SEM_BUSY_M) { - usleep_range(10000, 15000); - continue; - } - break; - } - if (hh_lock & PFHH_SEM_BUSY_M) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + if (tmr_idx > 1) + return -EINVAL; + + /* Poll until we obtain the cross-timestamp hardware semaphore */ + err = rd32_poll_timeout(hw, cfg->lock_reg, lock, + !(lock & cfg->lock_busy), + 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC); + if (err) { + dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n"); return -EBUSY; } + /* Snapshot system time for historic interpolation */ + ktime_get_snapshot(&ctx->snapshot); + /* Program cmd to master timer */ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); /* Start the ART and device clock sync sequence */ - hh_art_ctl = rd32(hw, GLHH_ART_CTL); - hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; - wr32(hw, GLHH_ART_CTL, hh_art_ctl); - - for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { - /* Wait for sync to complete */ - hh_art_ctl = rd32(hw, GLHH_ART_CTL); - if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { - udelay(1); - continue; - } else { - u32 hh_ts_lo, hh_ts_hi, tmr_idx; - u64 hh_ts; - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - /* Read ART time */ - hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); - hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); - hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - system->cycles = hh_ts; - system->cs_id = CSID_X86_ART; - /* Read Device source clock time */ - hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); - hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); - hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - *device = ns_to_ktime(hh_ts); - break; - } - } + ctl = rd32(hw, cfg->ctl_reg); + ctl |= cfg->ctl_active; + wr32(hw, cfg->ctl_reg, ctl); + /* Poll until hardware completes the capture */ + err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active), + 5, 20 * USEC_PER_MSEC); + if (err) + goto err_timeout; + + /* Read ART system time */ + ts_lo = rd32(hw, cfg->art_time_l); + ts_hi = rd32(hw, cfg->art_time_h); + ts = ((u64)ts_hi << 32) | ts_lo; + system->cycles = ts; + system->cs_id = CSID_X86_ART; + system->use_nsecs = true; + + /* Read Device source clock time */ + ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]); + ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]); + ts = ((u64)ts_hi << 32) | ts_lo; + *device = ns_to_ktime(ts); + +err_timeout: /* Clear the master timer */ ice_ptp_src_cmd(hw, ICE_PTP_NOP); /* Release HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); - hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; - wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); - - if (i == MAX_HH_CTL_LOCK_TRIES) - return -ETIMEDOUT; + lock = rd32(hw, cfg->lock_reg); + lock &= ~cfg->lock_busy; + wr32(hw, cfg->lock_reg, lock); - return 0; + return err; } /** - * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp + * ice_ptp_getcrosststamp - Capture a device cross timestamp * @info: the driver's PTP info structure * @cts: The memory to fill the cross timestamp info * @@ -2272,22 +2336,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device, * clock. Fill the cross timestamp information and report it back to the * caller. * - * This is only valid for E822 and E823 devices which have support for - * generating the cross timestamp via PCIe PTM. - * * In order to correctly correlate the ART timestamp back to the TSC time, the * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. + * + * Return: zero on success, or a negative error code on failure. */ -static int -ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, - struct system_device_crosststamp *cts) +static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) { struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_crosststamp_ctx ctx = { + .pf = pf, + }; + + switch (pf->hw.mac_type) { + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + ctx.cfg = &ice_crosststamp_cfg_e82x; + break; +#ifdef CONFIG_ICE_HWTS + case ICE_MAC_E830: + ctx.cfg = &ice_crosststamp_cfg_e830; + break; +#endif /* CONFIG_ICE_HWTS */ + default: + return -EOPNOTSUPP; + } - return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, - pf, NULL, cts); + return get_device_system_crosststamp(ice_capture_crosststamp, &ctx, + &ctx.snapshot, cts); } -#endif /* CONFIG_ICE_HWTS */ /** * ice_ptp_get_ts_config - ioctl interface to read the timestamping config @@ -2548,13 +2626,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, */ static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) { -#ifdef CONFIG_ICE_HWTS - if (boot_cpu_has(X86_FEATURE_ART) && - boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x; + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; -#endif /* CONFIG_ICE_HWTS */ - if (ice_is_e825c(&pf->hw)) { + if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) { pf->ptp.ice_pin_desc = ice_pin_desc_e825c; pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); } else { @@ -2620,6 +2694,28 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf) } } +/** + * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support + * @pf: Board private structure + * + * Assign functions to the PTP capabiltiies structure for E830 devices. + * Functions which operate across all device families should be set directly + * in ice_ptp_set_caps. Only add functions here which are distinct for E830 + * devices. + */ +static void ice_ptp_set_funcs_e830(struct ice_pf *pf) +{ +#ifdef CONFIG_ICE_HWTS + if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART)) + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; + +#endif /* CONFIG_ICE_HWTS */ + /* Rest of the config is the same as base E810 */ + pf->ptp.ice_pin_desc = ice_pin_desc_e810; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); + ice_ptp_setup_pin_cfg(pf); +} + /** * ice_ptp_set_caps - Set PTP capabilities * @pf: Board private structure @@ -2642,10 +2738,20 @@ static void ice_ptp_set_caps(struct ice_pf *pf) info->enable = ice_ptp_gpio_enable; info->verify = ice_verify_pin; - if (ice_is_e810(&pf->hw)) + switch (pf->hw.mac_type) { + case ICE_MAC_E810: ice_ptp_set_funcs_e810(pf); - else + return; + case ICE_MAC_E830: + ice_ptp_set_funcs_e830(pf); + return; + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: ice_ptp_set_funcs_e82x(pf); + return; + default: + return; + } } /** @@ -2755,6 +2861,68 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) } } +/** + * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context + * @pf: Board private structure + * + * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom + * half of the interrupt and IRQ_HANDLED otherwise. + */ +irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + + switch (hw->mac_type) { + case ICE_MAC_E810: + /* E810 capable of low latency timestamping with interrupt can + * request a single timestamp in the top half and wait for + * a second LL TS interrupt from the FW when it's ready. + */ + if (hw->dev_caps.ts_dev_info.ts_ll_int_read) { + struct ice_ptp_tx *tx = &pf->ptp.port.tx; + u8 idx, last; + + if (!ice_pf_state_is_nominal(pf)) + return IRQ_HANDLED; + + spin_lock(&tx->lock); + if (tx->init) { + last = tx->last_ll_ts_idx_read + 1; + idx = find_next_bit_wrap(tx->in_use, tx->len, + last); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + } + spin_unlock(&tx->lock); + + return IRQ_HANDLED; + } + fallthrough; /* non-LL_TS E810 */ + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + /* All other devices process timestamps in the bottom half due + * to sleeping or polling. + */ + if (!ice_ptp_pf_handles_tx_interrupt(pf)) + return IRQ_HANDLED; + + set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); + return IRQ_WAKE_THREAD; + case ICE_MAC_E830: + /* E830 can read timestamps in the top half using rd32() */ + if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { + /* Process outstanding Tx timestamps. If there + * is more work, re-arm the interrupt to trigger again. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } + return IRQ_HANDLED; + default: + return IRQ_HANDLED; + } +} + /** * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt * @pf: Board private structure @@ -2775,7 +2943,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) bool trigger_oicr = false; unsigned int i; - if (ice_is_e810(hw)) + if (!pf->ptp.port.tx.has_ready_bitmap) return; if (!ice_pf_src_tmr_owned(pf)) @@ -2914,14 +3082,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) */ ice_ptp_flush_all_tx_tracker(pf); - if (!ice_is_e810(hw)) { - /* Enable quad interrupts */ - err = ice_ptp_cfg_phy_interrupt(pf, true, 1); - if (err) - return err; + /* Enable quad interrupts */ + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); + if (err) + return err; - ice_ptp_restart_all_phy(pf); - } + ice_ptp_restart_all_phy(pf); /* Re-enable all periodic outputs and external timestamp events */ ice_ptp_enable_all_perout(pf); @@ -2969,8 +3135,9 @@ void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) static bool ice_is_primary(struct ice_hw *hw) { - return ice_is_e825c(hw) && ice_is_dual(hw) ? - !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true; + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ? + !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : + true; } static int ice_ptp_setup_adapter(struct ice_pf *pf) @@ -2988,7 +3155,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf) struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); struct ice_ptp *ptp = &pf->ptp; - if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP) + if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN) return -ENODEV; INIT_LIST_HEAD(&ptp->port.list_node); @@ -3005,7 +3172,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; - if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) { + if (pf->hw.mac_type != ICE_MAC_UNKNOWN) { mutex_lock(&pf->adapter->ports.lock); list_del(&ptp->port.list_node); mutex_unlock(&pf->adapter->ports.lock); @@ -3125,6 +3292,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) * ice_ptp_init_port - Initialize PTP port structure * @pf: Board private structure * @ptp_port: PTP port structure + * + * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc. */ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) { @@ -3132,16 +3301,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) mutex_init(&ptp_port->ps_lock); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, - ptp_port->port_num); - case ICE_PHY_E810: - return ice_ptp_init_tx_e810(pf, &ptp_port->tx); - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num); + case ICE_MAC_GENERIC: kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, ptp_port->port_num); default: @@ -3160,8 +3327,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) */ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { - switch (ice_get_phy_model(&pf->hw)) { - case ICE_PHY_E82X: + switch (pf->hw.mac_type) { + case ICE_MAC_GENERIC: /* E822 based PHY has the clock owner process the interrupt * for all ports. */ @@ -3192,10 +3359,17 @@ void ice_ptp_init(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; - int err; + int lane_num, err; ptp->state = ICE_PTP_INITIALIZING; + lane_num = ice_get_phy_lane_number(hw); + if (lane_num < 0) { + err = lane_num; + goto err_exit; + } + + ptp->port.port_num = (u8)lane_num; ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); @@ -3216,10 +3390,6 @@ void ice_ptp_init(struct ice_pf *pf) if (err) goto err_exit; - ptp->port.port_num = hw->pf_id; - if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) - ptp->port.port_num = hw->pf_id * 2; - err = ice_ptp_init_port(pf, &ptp->port); if (err) goto err_exit; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 824e73b677a43..783139de7f741 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -128,8 +128,7 @@ struct ice_ptp_tx { /* Quad and port information for initializing timestamp blocks */ #define INDEX_PER_QUAD 64 #define INDEX_PER_PORT_E82X 16 -#define INDEX_PER_PORT_E810 64 -#define INDEX_PER_PORT_ETH56G 64 +#define INDEX_PER_PORT 64 /** * struct ice_ptp_port - data used to initialize an external port for PTP @@ -211,6 +210,7 @@ enum ice_ptp_pin_nvm { * struct ice_ptp_pin_desc - hardware pin description data * @name_idx: index of the name of pin in ice_pin_names * @gpio: the associated GPIO input and output pins + * @delay: input and output signal delays in nanoseconds * * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array * for the device. Device families have separate sets of available pins with @@ -219,6 +219,7 @@ enum ice_ptp_pin_nvm { struct ice_ptp_pin_desc { int name_idx; int gpio[2]; + unsigned int delay[2]; }; /** @@ -302,6 +303,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); +irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx); @@ -310,7 +312,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); -void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); +void ice_ptp_link_change(struct ice_pf *pf, bool linkup); #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) { @@ -340,6 +342,11 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf) return true; } +static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) +{ + return IRQ_HANDLED; +} + static inline u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx) @@ -358,7 +365,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf, } static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } -static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 585ce200c60f1..fe362e3faff1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = { .rx_offset = { .serdes = 0xffffeb27, /* -10.42424 */ .no_fec = 0xffffcccd, /* -25.6 */ - .fc = 0xfffe0014, /* -255.96 */ + .fc = 0xfffc557b, /* -469.26 */ .sfd = 0x4a4, /* 2.32 */ .bs_ds = 0x32 /* 0.0969697 */ } @@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 823437500, /* 823.4375 MHz PLL */ /* nominal_incval */ 0x136e44fabULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ @@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ @@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 796875000, /* 796.875 MHz */ /* nominal_incval */ 0x141414141ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ @@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 816000000, /* 816 MHz */ /* nominal_incval */ 0x139b9b9baULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ @@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 830078125, /* 830.78125 MHz */ /* nominal_incval */ 0x134679aceULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ @@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, }; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index ce9ee05c0e777..90eb8ed6cee7e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw) int err; /* Disable sticky lock detection so lock err reported is accurate */ - if (ice_is_e825c(hw)) + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw); else err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw); @@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw) /* Configure the CGU PLL using the parameters from the function * capabilities. */ - if (ice_is_e825c(hw)) + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref, (enum ice_clk_src)ts_info->clk_src); else @@ -827,8 +827,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, /* Certain hardware families share the same register values for the * port register and source timer register. */ - switch (ice_get_phy_model(hw)) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; default: break; @@ -895,36 +896,62 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) ice_flush(hw); } +/** + * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay + * @hw: pointer to HW struct + * @delay: delay between PHC and PHY SYNC command execution in nanoseconds + */ +static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) +{ + wr32(hw, GLTSYN_SYNC_DLAY, delay); + ice_flush(hw); +} + /* 56G PHY device functions * * The following functions operate on devices with the ETH 56G PHY. */ +/** + * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number + * @hw: pointer to the HW struct + * @port: destination port + * + * Return: destination sideband queue PHY device. + */ +static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw, + u8 port) +{ + /* On a single complex E825, PHY 0 is always destination device phy_0 + * and PHY 1 is phy_0_peer. + */ + if (port >= hw->ptp.ports_per_phy) + return eth56g_phy_1; + else + return eth56g_phy_0; +} + /** * ice_write_phy_eth56g - Write a PHY port register * @hw: pointer to the HW struct - * @phy_idx: PHY index + * @port: destination port * @addr: PHY register address * @val: Value to write * * Return: 0 on success, other error codes when failed to write to PHY */ -static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, - u32 val) +static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val) { - struct ice_sbq_msg_input phy_msg; + struct ice_sbq_msg_input msg = { + .dest_dev = ice_ptp_get_dest_dev_e825(hw, port), + .opcode = ice_sbq_msg_wr, + .msg_addr_low = lower_16_bits(addr), + .msg_addr_high = upper_16_bits(addr), + .data = val + }; int err; - phy_msg.opcode = ice_sbq_msg_wr; - - phy_msg.msg_addr_low = lower_16_bits(addr); - phy_msg.msg_addr_high = upper_16_bits(addr); - - phy_msg.data = val; - phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; - - err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); - + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", err); @@ -935,41 +962,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, /** * ice_read_phy_eth56g - Read a PHY port register * @hw: pointer to the HW struct - * @phy_idx: PHY index + * @port: destination port * @addr: PHY register address * @val: Value to write * * Return: 0 on success, other error codes when failed to read from PHY */ -static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, - u32 *val) +static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val) { - struct ice_sbq_msg_input phy_msg; + struct ice_sbq_msg_input msg = { + .dest_dev = ice_ptp_get_dest_dev_e825(hw, port), + .opcode = ice_sbq_msg_rd, + .msg_addr_low = lower_16_bits(addr), + .msg_addr_high = upper_16_bits(addr) + }; int err; - phy_msg.opcode = ice_sbq_msg_rd; - - phy_msg.msg_addr_low = lower_16_bits(addr); - phy_msg.msg_addr_high = upper_16_bits(addr); - - phy_msg.data = 0; - phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; - - err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); - if (err) { + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); + if (err) ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", err); - return err; - } - - *val = phy_msg.data; + else + *val = msg.data; - return 0; + return err; } /** * ice_phy_res_address_eth56g - Calculate a PHY port register address - * @port: Port number to be written + * @hw: pointer to the HW struct + * @lane: Lane number to be written * @res_type: resource type (register/memory) * @offset: Offset from PHY port register base * @addr: The result address @@ -978,17 +1000,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, * * %0 - success * * %EINVAL - invalid port number or resource type */ -static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type, - u32 offset, u32 *addr) +static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane, + enum eth56g_res_type res_type, + u32 offset, + u32 *addr) { - u8 lane = port % ICE_PORTS_PER_QUAD; - u8 phy = ICE_GET_QUAD_NUM(port); - if (res_type >= NUM_ETH56G_PHY_RES) return -EINVAL; - *addr = eth56g_phy_res[res_type].base[phy] + + /* Lanes 4..7 are in fact 0..3 on a second PHY */ + lane %= hw->ptp.ports_per_phy; + *addr = eth56g_phy_res[res_type].base[0] + lane * eth56g_phy_res[res_type].step + offset; + return 0; } @@ -1008,19 +1032,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type, static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, u32 val, enum eth56g_res_type res_type) { - u8 phy_port = port % hw->ptp.ports_per_phy; - u8 phy_idx = port / hw->ptp.ports_per_phy; u32 addr; int err; if (port >= hw->ptp.num_lports) return -EINVAL; - err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr); if (err) return err; - return ice_write_phy_eth56g(hw, phy_idx, addr, val); + return ice_write_phy_eth56g(hw, port, addr, val); } /** @@ -1039,19 +1061,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, u32 *val, enum eth56g_res_type res_type) { - u8 phy_port = port % hw->ptp.ports_per_phy; - u8 phy_idx = port / hw->ptp.ports_per_phy; u32 addr; int err; if (port >= hw->ptp.num_lports) return -EINVAL; - err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr); if (err) return err; - return ice_read_phy_eth56g(hw, phy_idx, addr, val); + return ice_read_phy_eth56g(hw, port, addr, val); } /** @@ -1200,6 +1220,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); } +/** + * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * + * Return: + * * %0 - success + * * %EIO - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u32 offset, u32 val) +{ + u32 addr; + + if (port >= hw->ptp.num_lports) + return -EIO; + + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset; + + return ice_write_phy_eth56g(hw, port, addr, val); +} + +/** + * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to read + * + * Return: + * * %0 - success + * * %EIO - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u32 offset, u32 *val) +{ + u32 addr; + + if (port >= hw->ptp.num_lports) + return -EIO; + + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset; + + return ice_read_phy_eth56g(hw, port, addr, val); +} + /** * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register * @low_addr: the low address to check @@ -1518,8 +1588,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx, * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); - + *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) | + FIELD_PREP(PHY_40B_LOW_M, lo); return 0; } @@ -1918,7 +1988,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li) */ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) { - u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); u32 val; int err; @@ -1933,8 +2002,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { case ICE_ETH56G_LNK_SPD_1G: case ICE_ETH56G_LNK_SPD_2_5G: - err = ice_read_ptp_reg_eth56g(hw, port_blk, - PHY_GPCS_CONFIG_REG0, &val); + err = ice_read_quad_ptp_reg_eth56g(hw, port, + PHY_GPCS_CONFIG_REG0, &val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d", err); @@ -1945,8 +2014,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M, ICE_ETH56G_NOMINAL_TX_THRESH); - err = ice_write_ptp_reg_eth56g(hw, port_blk, - PHY_GPCS_CONFIG_REG0, val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, + PHY_GPCS_CONFIG_REG0, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d", err); @@ -1987,50 +2056,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) */ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) { - u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); - u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1); + u8 quad_lane = port % ICE_PORTS_PER_QUAD; + u32 addr, val, peer_delay; bool enable, sfd_ena; - u32 val, peer_delay; int err; enable = hw->ptp.phy.eth56g.onestep_ena; peer_delay = hw->ptp.phy.eth56g.peer_delay; sfd_ena = hw->ptp.phy.eth56g.sfd_ena; - /* PHY_PTP_1STEP_CONFIG */ - err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val); + addr = PHY_PTP_1STEP_CONFIG; + err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val); if (err) return err; if (enable) - val |= blk_port; + val |= BIT(quad_lane); else - val &= ~blk_port; + val &= ~BIT(quad_lane); val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M); - err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); if (err) return err; - /* PHY_PTP_1STEP_PEER_DELAY */ + addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane); val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay); if (peer_delay) val |= PHY_PTP_1STEP_PD_ADD_PD_M; val |= PHY_PTP_1STEP_PD_DLY_V_M; - err = ice_write_ptp_reg_eth56g(hw, port_blk, - PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); if (err) return err; val &= ~PHY_PTP_1STEP_PD_DLY_V_M; - err = ice_write_ptp_reg_eth56g(hw, port_blk, - PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); if (err) return err; - /* PHY_MAC_XIF_MODE */ - err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val); + addr = PHY_MAC_XIF_MODE; + err = ice_read_mac_reg_eth56g(hw, port, addr, &val); if (err) return err; @@ -2050,7 +2116,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) | FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena); - return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val); + return ice_write_mac_reg_eth56g(hw, port, addr, val); } /** @@ -2092,21 +2158,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs, bool fc, bool rs, enum ice_eth56g_link_spd spd) { - u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1); - u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); u32 bitslip; int err; if (!bs || rs) return 0; - if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) + if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) { err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP, &bitslip); - else - err = ice_read_ptp_reg_eth56g(hw, port_blk, - PHY_REG_SD_BIT_SLIP(port_offset), - &bitslip); + } else { + u8 quad_lane = port % ICE_PORTS_PER_QUAD; + u32 addr; + + addr = PHY_REG_SD_BIT_SLIP(quad_lane); + err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip); + } if (err) return 0; @@ -2666,59 +2733,23 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port, } /** - * ice_is_muxed_topo - detect breakout 2x50G topology for E825C + * ice_ptp_init_phy_e825 - initialize PHY parameters * @hw: pointer to the HW struct - * - * Return: true if it's 2x50 breakout topology, false otherwise */ -static bool ice_is_muxed_topo(struct ice_hw *hw) -{ - u8 link_topo; - bool mux; - u32 val; - - val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); - mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val); - val = rd32(hw, GLGEN_MAC_LINK_TOPO); - link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val); - - return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS); -} - -/** - * ice_ptp_init_phy_e825c - initialize PHY parameters - * @hw: pointer to the HW struct - */ -static void ice_ptp_init_phy_e825c(struct ice_hw *hw) +static void ice_ptp_init_phy_e825(struct ice_hw *hw) { struct ice_ptp_hw *ptp = &hw->ptp; struct ice_eth56g_params *params; - u8 phy; - ptp->phy_model = ICE_PHY_ETH56G; params = &ptp->phy.eth56g; params->onestep_ena = false; params->peer_delay = 0; params->sfd_ena = false; - params->phy_addr[0] = eth56g_phy_0; - params->phy_addr[1] = eth56g_phy_1; params->num_phys = 2; ptp->ports_per_phy = 4; ptp->num_lports = params->num_phys * ptp->ports_per_phy; ice_sb_access_ena_eth56g(hw, true); - for (phy = 0; phy < params->num_phys; phy++) { - u32 phy_rev; - int err; - - err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev); - if (err || phy_rev != PHY_REVISION_ETH56G) { - ptp->phy_model = ICE_PHY_UNSUP; - return; - } - } - - ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw); } /* E822 family functions @@ -2737,10 +2768,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw, struct ice_sbq_msg_input *msg, u8 port, u16 offset) { - int phy_port, phy, quadtype; + int phy_port, quadtype; phy_port = port % hw->ptp.ports_per_phy; - phy = port / hw->ptp.ports_per_phy; quadtype = ICE_GET_QUAD_NUM(port) % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy); @@ -2752,12 +2782,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw, msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); } - if (phy == 0) - msg->dest_dev = rmn_0; - else if (phy == 1) - msg->dest_dev = rmn_1; - else - msg->dest_dev = rmn_2; + msg->dest_dev = rmn_0; } /** @@ -3199,7 +3224,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo); + *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) | + FIELD_PREP(PHY_40B_LOW_M, lo); return 0; } @@ -4772,7 +4798,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold) */ static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp) { - ptp->phy_model = ICE_PHY_E82X; ptp->num_lports = 8; ptp->ports_per_phy = 8; } @@ -4966,7 +4991,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) /* For E810 devices, the timestamp is reported with the lower 32 bits * in the low register, and the upper 8 bits in the high register. */ - *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M); + *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | + FIELD_PREP(PHY_EXT_40B_LOW_M, lo); return 0; } @@ -5029,8 +5055,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw) u8 tmr_idx; int err; - /* Ensure synchronization delay is zero */ - wr32(hw, GLTSYN_SYNC_DLAY, 0); + ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), @@ -5295,68 +5320,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) * to access the extended GPIOs available. */ -/** - * ice_get_pca9575_handle - * @hw: pointer to the hw struct - * @pca9575_handle: GPIO controller's handle - * - * Find and return the GPIO controller's handle in the netlist. - * When found - the value will be cached in the hw structure and following calls - * will return cached value - */ -static int -ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) -{ - struct ice_aqc_get_link_topo *cmd; - struct ice_aq_desc desc; - int status; - u8 idx; - - /* If handle was read previously return cached value */ - if (hw->io_expander_handle) { - *pca9575_handle = hw->io_expander_handle; - return 0; - } - - /* If handle was not detected read it from the netlist */ - cmd = &desc.params.get_link_topo; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - - /* Set node type to GPIO controller */ - cmd->addr.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & - ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); - -#define SW_PCA9575_SFP_TOPO_IDX 2 -#define SW_PCA9575_QSFP_TOPO_IDX 1 - - /* Check if the SW IO expander controlling SMA exists in the netlist. */ - if (hw->device_id == ICE_DEV_ID_E810C_SFP) - idx = SW_PCA9575_SFP_TOPO_IDX; - else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) - idx = SW_PCA9575_QSFP_TOPO_IDX; - else - return -EOPNOTSUPP; - - cmd->addr.topo_params.index = idx; - - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); - if (status) - return -EOPNOTSUPP; - - /* Verify if we found the right IO expander type */ - if (desc.params.get_link_topo.node_part_num != - ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) - return -EOPNOTSUPP; - - /* If present save the handle and return it */ - hw->io_expander_handle = - le16_to_cpu(desc.params.get_link_topo.addr.handle); - *pca9575_handle = hw->io_expander_handle; - - return 0; -} - /** * ice_read_sma_ctrl * @hw: pointer to the hw struct @@ -5421,37 +5384,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data) return status; } -/** - * ice_read_pca9575_reg - * @hw: pointer to the hw struct - * @offset: GPIO controller register offset - * @data: pointer to data to be read from the GPIO controller - * - * Read the register from the GPIO controller - */ -int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) -{ - struct ice_aqc_link_topo_addr link_topo; - __le16 addr; - u16 handle; - int err; - - memset(&link_topo, 0, sizeof(link_topo)); - - err = ice_get_pca9575_handle(hw, &handle); - if (err) - return err; - - link_topo.handle = cpu_to_le16(handle); - link_topo.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, - ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); - - addr = cpu_to_le16((u16)offset); - - return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); -} - /** * ice_ptp_read_sdp_ac - read SDP available connections section from NVM * @hw: pointer to the HW struct @@ -5518,18 +5450,138 @@ int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries) */ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) { - ptp->phy_model = ICE_PHY_E810; ptp->num_lports = 8; ptp->ports_per_phy = 4; init_waitqueue_head(&ptp->phy.e810.atqbal_wq); } +/* E830 functions + * + * The following functions operate on the E830 series devices. + * + */ + +/** + * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform E830-specific PTP hardware clock initialization steps. + */ +static void ice_ptp_init_phc_e830(const struct ice_hw *hw) +{ + ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); +} + +/** + * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change + * @hw: pointer to HW struct + * @incval: The new 40bit increment value to prepare + * + * Prepare the PHY port for a new increment value by programming the PHC + * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is + * completed by FW automatically. + */ +static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw, + u64 incval) +{ + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval)); + wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval)); +} + +/** + * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time + * @hw: Board private structure + * @time: Time to initialize the PHY port clock to + * + * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the + * initial clock time. The time will not actually be programmed until the + * driver issues an ICE_PTP_INIT_TIME command. + * + * The time value is the upper 32 bits of the PHY timer, usually in units of + * nominal nanoseconds. + */ +static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw, + u64 time) +{ + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + wr32(hw, GLTSYN_TIME_0(tmr_idx), 0); + wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time)); + wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time)); +} + +/** + * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command + * @hw: pointer to HW struct + * @cmd: Command to be sent to the port + * + * Prepare the external PHYs connected to this device for a timer sync + * command. + * + * Return: 0 on success, negative error code when PHY write failed + */ +static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); + + return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val); +} + +/** + * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY + * @hw: pointer to the HW struct + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the timestamp block of the external PHY + * on the E830 device. + */ +static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx, + u64 *tstamp) +{ + u32 hi, lo; + + hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx)); + lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx)); + + /* For E830 devices, the timestamp is reported with the lower 32 bits + * in the low register, and the upper 8 bits in the high register. + */ + *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | + FIELD_PREP(PHY_EXT_40B_LOW_M, lo); +} + +/** + * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @tstamp_ready: contents of the Tx memory status register + */ +static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port, + u64 *tstamp_ready) +{ + *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H); + *tstamp_ready <<= 32; + *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L); +} + +/** + * ice_ptp_init_phy_e830 - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp) +{ + ptp->num_lports = 8; + ptp->ports_per_phy = 4; +} + /* Device agnostic functions * - * The following functions implement shared behavior common to both E822 and - * E810 devices, possibly calling a device specific implementation where - * necessary. + * The following functions implement shared behavior common to all devices, + * possibly calling a device specific implementation where necessary. */ /** @@ -5592,14 +5644,22 @@ void ice_ptp_init_hw(struct ice_hw *hw) { struct ice_ptp_hw *ptp = &hw->ptp; - if (ice_is_e822(hw) || ice_is_e823(hw)) - ice_ptp_init_phy_e82x(ptp); - else if (ice_is_e810(hw)) + switch (hw->mac_type) { + case ICE_MAC_E810: ice_ptp_init_phy_e810(ptp); - else if (ice_is_e825c(hw)) - ice_ptp_init_phy_e825c(hw); - else - ptp->phy_model = ICE_PHY_UNSUP; + break; + case ICE_MAC_E830: + ice_ptp_init_phy_e830(ptp); + break; + case ICE_MAC_GENERIC: + ice_ptp_init_phy_e82x(ptp); + break; + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_init_phy_e825(hw); + break; + default: + return; + } } /** @@ -5620,11 +5680,11 @@ void ice_ptp_init_hw(struct ice_hw *hw) static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_GENERIC: return ice_ptp_write_port_cmd_e82x(hw, port, cmd); + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); default: return -EOPNOTSUPP; } @@ -5685,9 +5745,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) u32 port; /* PHY models which can program all ports simultaneously */ - switch (ice_get_phy_model(hw)) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_ptp_port_cmd_e810(hw, cmd); + case ICE_MAC_E830: + return ice_ptp_port_cmd_e830(hw, cmd); default: break; } @@ -5758,23 +5820,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Source timers */ + /* For E830 we don't need to use shadow registers, its automatic */ + if (hw->mac_type == ICE_MAC_E830) { + ice_ptp_write_direct_phc_time_e830(hw, time); + return 0; + } + wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_ptp_prep_phy_time_eth56g(hw, - (u32)(time & 0xFFFFFFFF)); - break; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_time_eth56g(hw, + (u32)(time & 0xFFFFFFFF)); + break; default: err = -EOPNOTSUPP; } @@ -5806,20 +5874,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + /* For E830 we don't need to use shadow registers, its automatic */ + if (hw->mac_type == ICE_MAC_E830) { + ice_ptp_write_direct_incval_e830(hw, incval); + return 0; + } + /* Shadow Adjust */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_ptp_prep_phy_incval_eth56g(hw, incval); - break; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_incval_e82x(hw, incval); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_incval_eth56g(hw, incval); + break; default: err = -EOPNOTSUPP; } @@ -5879,16 +5953,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_ptp_prep_phy_adj_eth56g(hw, adj); - break; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; - case ICE_PHY_E82X: + case ICE_MAC_E830: + /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */ + return 0; + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_adj_e82x(hw, adj); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_adj_eth56g(hw, adj); + break; default: err = -EOPNOTSUPP; } @@ -5912,13 +5989,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_read_phy_tstamp_e830(hw, idx, tstamp); + return 0; + case ICE_MAC_GENERIC: return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); + case ICE_MAC_GENERIC_3K_E825: + return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); default: return -EOPNOTSUPP; } @@ -5942,13 +6022,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_clear_ptp_tstamp_eth56g(hw, block, idx); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: return ice_clear_phy_tstamp_e82x(hw, block, idx); + case ICE_MAC_GENERIC_3K_E825: + return ice_clear_ptp_tstamp_eth56g(hw, block, idx); default: return -EOPNOTSUPP; } @@ -6005,14 +6085,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - ice_ptp_reset_ts_memory_eth56g(hw); - break; - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_GENERIC: ice_ptp_reset_ts_memory_e82x(hw); break; - case ICE_PHY_E810: + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_reset_ts_memory_eth56g(hw); + break; + case ICE_MAC_E810: default: return; } @@ -6034,13 +6114,16 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_ptp_init_phc_eth56g(hw); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_ptp_init_phc_e810(hw); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_ptp_init_phc_e830(hw); + return 0; + case ICE_MAC_GENERIC: return ice_ptp_init_phc_e82x(hw); + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_phc_eth56g(hw); default: return -EOPNOTSUPP; } @@ -6059,17 +6142,19 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, - tstamp_ready); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready); + return 0; + case ICE_MAC_GENERIC: return ice_get_phy_tx_tstamp_ready_e82x(hw, block, tstamp_ready); - break; + case ICE_MAC_GENERIC_3K_E825: + return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, + tstamp_ready); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 774244f2d8546..6dd028c2d7588 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g { * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L - * @pps_delay: propagation delay of the PPS output signal * * Characteristic information for the various TIME_REF sources possible in the * E822 devices @@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g { struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; - u8 pps_delay; }; /** @@ -326,8 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; */ #define ICE_E810_PLL_FREQ 812500000 #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL -#define ICE_E810_OUT_PROP_DELAY_NS 1 -#define ICE_E825C_OUT_PROP_DELAY_NS 11 +#define ICE_E810_E830_SYNC_DELAY 0 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); @@ -389,11 +386,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) return e82x_time_ref[time_ref].nominal_incval; } -static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) -{ - return e82x_time_ref[time_ref].pps_delay; -} - /* E822 Vernier calibration functions */ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); @@ -404,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold); /* E810 family functions */ int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl(struct ice_hw *hw, u8 data); -int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries); int ice_cgu_get_num_pins(struct ice_hw *hw, bool input); enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); @@ -434,20 +425,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); #define ICE_ETH56G_NOMINAL_THRESH4 0x7777 #define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 -static inline u64 ice_prop_delay(const struct ice_hw *hw) -{ - switch (hw->ptp.phy_model) { - case ICE_PHY_ETH56G: - return ICE_E825C_OUT_PROP_DELAY_NS; - case ICE_PHY_E810: - return ICE_E810_OUT_PROP_DELAY_NS; - case ICE_PHY_E82X: - return ice_e82x_pps_delay(ice_e82x_time_ref(hw)); - default: - return 0; - } -} - /** * ice_get_base_incval - Get base clock increment value * @hw: pointer to the HW struct @@ -456,13 +433,14 @@ static inline u64 ice_prop_delay(const struct ice_hw *hw) */ static inline u64 ice_get_base_incval(struct ice_hw *hw) { - switch (hw->ptp.phy_model) { - case ICE_PHY_ETH56G: - return ICE_ETH56G_NOMINAL_INCVAL; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: return ICE_PTP_NOMINAL_INCVAL_E810; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: return ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); + case ICE_MAC_GENERIC_3K_E825: + return ICE_ETH56G_NOMINAL_INCVAL; default: return 0; } @@ -675,19 +653,25 @@ static inline bool ice_is_dual(struct ice_hw *hw) /* E810 timer command register */ #define E810_ETH_GLTSYN_CMD 0x03000344 +/* E830 timer command register */ +#define E830_ETH_GLTSYN_CMD 0x00088814 + +/* E810 PHC time register */ +#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx)) + /* Source timer incval macros */ #define INCVAL_HIGH_M 0xFF -/* Timestamp block macros */ +/* PHY 40b registers macros */ +#define PHY_EXT_40B_LOW_M GENMASK(31, 0) +#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32) +#define PHY_40B_LOW_M GENMASK(7, 0) +#define PHY_40B_HIGH_M GENMASK_ULL(39, 8) #define TS_VALID BIT(0) #define TS_LOW_M 0xFFFFFFFF #define TS_HIGH_M 0xFF #define TS_HIGH_S 32 -#define TS_PHY_LOW_M 0xFF -#define TS_PHY_HIGH_M 0xFFFFFFFF -#define TS_PHY_HIGH_S 8 - #define BYTES_PER_IDX_ADDR_L_U 8 #define BYTES_PER_IDX_ADDR_L 4 diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 6e59ce991fc73..fda5c79df7541 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -861,7 +861,6 @@ struct ice_e810_params { struct ice_eth56g_params { u8 num_phys; - u8 phy_addr[2]; bool onestep_ena; bool sfd_ena; u32 peer_delay; @@ -872,14 +871,6 @@ union ice_phy_params { struct ice_eth56g_params eth56g; }; -/* PHY model */ -enum ice_phy_model { - ICE_PHY_UNSUP = -1, - ICE_PHY_E810 = 1, - ICE_PHY_E82X, - ICE_PHY_ETH56G, -}; - /* Global Link Topology */ enum ice_global_link_topo { ICE_LINK_TOPO_UP_TO_2_LINKS, @@ -889,11 +880,9 @@ enum ice_global_link_topo { }; struct ice_ptp_hw { - enum ice_phy_model phy_model; union ice_phy_params phy; u8 num_lports; u8 ports_per_phy; - bool is_2x50g_muxed_topo; }; /* Port hardware description */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index eac0f966e0e4c..323db1e2be388 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -319,6 +319,7 @@ struct igc_adapter { struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ ktime_t ptp_reset_start; /* Reset time in clock mono */ struct system_time_snapshot snapshot; + struct mutex ptm_lock; /* Only allow one PTM transaction at a time */ char fw_version[32]; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 8e449904aa7db..d19325b0e6e0b 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -574,7 +574,10 @@ #define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) -#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */ +/* A short cycle time of 1us theoretically should work, but appears to be too + * short in practice. + */ +#define IGC_PTM_SHORT_CYC_DEFAULT 4 /* Default short cycle interval */ #define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ #define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ @@ -593,6 +596,7 @@ #define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ #define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ #define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ +#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */ /* PCIe PTM Cycle Control */ #define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6e70bca15db1d..cc89b72c85af7 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7169,6 +7169,7 @@ static int igc_probe(struct pci_dev *pdev, err_register: igc_release_hw_control(adapter); + igc_ptp_stop(adapter); err_eeprom: if (!igc_check_reset_block(hw)) igc_reset_phy(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 946edbad43022..efc7b30e42113 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -974,45 +974,62 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) } } +/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_trigger() */ +static void igc_ptm_trigger(struct igc_hw *hw) +{ + u32 ctrl; + + /* To "manually" start the PTM cycle we need to set the + * trigger (TRIG) bit + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Perform flush after write to CTRL register otherwise + * transaction may not start + */ + wrfl(); +} + +/* The PTM lock: adapter->ptm_lock must be held when calling igc_ptm_reset() */ +static void igc_ptm_reset(struct igc_hw *hw) +{ + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Write to clear all status */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL); +} + static int igc_phc_get_syncdevicetime(ktime_t *device, struct system_counterval_t *system, void *ctx) { - u32 stat, t2_curr_h, t2_curr_l, ctrl; struct igc_adapter *adapter = ctx; struct igc_hw *hw = &adapter->hw; + u32 stat, t2_curr_h, t2_curr_l; int err, count = 100; ktime_t t1, t2_curr; - /* Get a snapshot of system clocks to use as historic value. */ - ktime_get_snapshot(&adapter->snapshot); - + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ do { - /* Doing this in a loop because in the event of a - * badly timed (ha!) system clock adjustment, we may - * get PTM errors from the PCI root, but these errors - * are transitory. Repeating the process returns valid - * data eventually. - */ + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); - /* To "manually" start the PTM cycle we need to clear and - * then set again the TRIG bit. - */ - ctrl = rd32(IGC_PTM_CTRL); - ctrl &= ~IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - ctrl |= IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - - /* The cycle only starts "for real" when software notifies - * that it has read the registers, this is done by setting - * VALID bit. - */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, stat, IGC_PTM_STAT_SLEEP, IGC_PTM_STAT_TIMEOUT); + igc_ptm_reset(hw); + if (err < 0) { netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); return err; @@ -1021,15 +1038,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device, if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) break; - if (stat & ~IGC_PTM_STAT_VALID) { - /* An error occurred, log it. */ - igc_ptm_log_error(adapter, stat); - /* The STAT register is write-1-to-clear (W1C), - * so write the previous error status to clear it. - */ - wr32(IGC_PTM_STAT, stat); - continue; - } + igc_ptm_log_error(adapter, stat); } while (--count); if (!count) { @@ -1061,9 +1070,16 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, { struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, ptp_caps); + int ret; + + /* This blocks until any in progress PTM transactions complete */ + mutex_lock(&adapter->ptm_lock); + + ret = get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); + mutex_unlock(&adapter->ptm_lock); - return get_device_system_crosststamp(igc_phc_get_syncdevicetime, - adapter, &adapter->snapshot, cts); + return ret; } static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp, @@ -1162,6 +1178,7 @@ void igc_ptp_init(struct igc_adapter *adapter) spin_lock_init(&adapter->ptp_tx_lock); spin_lock_init(&adapter->free_timer_lock); spin_lock_init(&adapter->tmreg_lock); + mutex_init(&adapter->ptm_lock); adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; @@ -1174,6 +1191,7 @@ void igc_ptp_init(struct igc_adapter *adapter) if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; netdev_err(netdev, "ptp_clock_register failed\n"); + mutex_destroy(&adapter->ptm_lock); } else if (adapter->ptp_clock) { netdev_info(netdev, "PHC added\n"); adapter->ptp_flags |= IGC_PTP_ENABLED; @@ -1203,10 +1221,12 @@ static void igc_ptm_stop(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl; + mutex_lock(&adapter->ptm_lock); ctrl = rd32(IGC_PTM_CTRL); ctrl &= ~IGC_PTM_CTRL_EN; wr32(IGC_PTM_CTRL, ctrl); + mutex_unlock(&adapter->ptm_lock); } /** @@ -1237,13 +1257,18 @@ void igc_ptp_suspend(struct igc_adapter *adapter) **/ void igc_ptp_stop(struct igc_adapter *adapter) { + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + igc_ptp_suspend(adapter); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); netdev_info(adapter->netdev, "PHC removed\n"); adapter->ptp_flags &= ~IGC_PTP_ENABLED; } + mutex_destroy(&adapter->ptm_lock); } /** @@ -1255,13 +1280,18 @@ void igc_ptp_stop(struct igc_adapter *adapter) void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; - u32 cycle_ctrl, ctrl; + u32 cycle_ctrl, ctrl, stat; unsigned long flags; u32 timadj; + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + mutex_lock(&adapter->ptm_lock); + spin_lock_irqsave(&adapter->tmreg_lock, flags); switch (adapter->hw.mac.type) { @@ -1290,14 +1320,19 @@ void igc_ptp_reset(struct igc_adapter *adapter) ctrl = IGC_PTM_CTRL_EN | IGC_PTM_CTRL_START_NOW | IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | - IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | - IGC_PTM_CTRL_TRIG; + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT); wr32(IGC_PTM_CTRL, ctrl); /* Force the first cycle to run. */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); + if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT)) + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + + igc_ptm_reset(hw); break; default: /* No work to do. */ @@ -1314,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter) out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptm_lock); + wrfl(); } diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 062e07c25ac36..301d979d0d08c 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2201,6 +2201,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index d53ee34d398f6..5235306de02be 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -76,6 +76,13 @@ unsigned long sclp_console_full; /* The currently active SCLP command word. */ static sclp_cmdw_t active_cmd; +static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int) +{ + if (sccb_int) + return __va(sccb_int); + return NULL; +} + static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err) { struct sclp_trace_entry e; @@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb) static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd) { - struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int); + struct sccb_header *sccb = sclpint_to_sccb(sccb_int); struct evbuf_header *evbuf; u16 response; @@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code, /* INT: Interrupt received (a=intparm, b=cmd) */ sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd, - (struct sccb_header *)__va(finished_sccb), + sclpint_to_sccb(finished_sccb), !ok_response(finished_sccb, active_cmd)); if (finished_sccb) { diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile index 6214a6b2e96d2..c025e875009e1 100644 --- a/drivers/scsi/fnic/Makefile +++ b/drivers/scsi/fnic/Makefile @@ -2,11 +2,13 @@ obj-$(CONFIG_FCOE_FNIC) += fnic.o fnic-y := \ + fip.o\ fnic_attrs.o \ fnic_isr.o \ fnic_main.o \ fnic_res.o \ fnic_fcs.o \ + fdls_disc.o \ fnic_scsi.o \ fnic_trace.o \ fnic_debugfs.o \ @@ -15,4 +17,5 @@ fnic-y := \ vnic_intr.o \ vnic_rq.o \ vnic_wq_copy.o \ - vnic_wq.o + vnic_wq.o \ + fnic_pci_subsys_devid.o diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c new file mode 100644 index 0000000000000..8d5848ee9c510 --- /dev/null +++ b/drivers/scsi/fnic/fdls_disc.c @@ -0,0 +1,5091 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include "fnic.h" +#include "fdls_fc.h" +#include "fnic_fdls.h" +#include +#include +#include + +#define FC_FC4_TYPE_SCSI 0x08 +#define PORT_SPEED_BIT_8 8 +#define PORT_SPEED_BIT_9 9 +#define PORT_SPEED_BIT_14 14 +#define PORT_SPEED_BIT_15 15 + +/* FNIC FDMI Register HBA Macros */ +#define FNIC_FDMI_NUM_PORTS 1 +#define FNIC_FDMI_NUM_HBA_ATTRS 9 +#define FNIC_FDMI_TYPE_NODE_NAME 0X1 +#define FNIC_FDMI_TYPE_MANUFACTURER 0X2 +#define FNIC_FDMI_MANUFACTURER "Cisco Systems" +#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3 +#define FNIC_FDMI_TYPE_MODEL 0X4 +#define FNIC_FDMI_TYPE_MODEL_DES 0X5 +#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card" +#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6 +#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7 +#define FNIC_FDMI_TYPE_ROM_VERSION 0X8 +#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9 +#define FNIC_FDMI_NN_LEN 8 +#define FNIC_FDMI_MANU_LEN 20 +#define FNIC_FDMI_SERIAL_LEN 16 +#define FNIC_FDMI_MODEL_LEN 12 +#define FNIC_FDMI_MODEL_DES_LEN 56 +#define FNIC_FDMI_HW_VER_LEN 16 +#define FNIC_FDMI_DR_VER_LEN 28 +#define FNIC_FDMI_ROM_VER_LEN 8 +#define FNIC_FDMI_FW_VER_LEN 16 + +/* FNIC FDMI Register PA Macros */ +#define FNIC_FDMI_TYPE_FC4_TYPES 0X1 +#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2 +#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3 +#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4 +#define FNIC_FDMI_TYPE_OS_NAME 0X5 +#define FNIC_FDMI_TYPE_HOST_NAME 0X6 +#define FNIC_FDMI_NUM_PORT_ATTRS 6 +#define FNIC_FDMI_FC4_LEN 32 +#define FNIC_FDMI_SUPP_SPEED_LEN 4 +#define FNIC_FDMI_CUR_SPEED_LEN 4 +#define FNIC_FDMI_MFS_LEN 4 +#define FNIC_FDMI_MFS 0x800 +#define FNIC_FDMI_OS_NAME_LEN 16 +#define FNIC_FDMI_HN_LEN 24 + +#define FDLS_FDMI_PLOGI_PENDING 0x1 +#define FDLS_FDMI_REG_HBA_PENDING 0x2 +#define FDLS_FDMI_RPA_PENDING 0x4 +#define FDLS_FDMI_ABORT_PENDING 0x8 +#define FDLS_FDMI_MAX_RETRY 3 + +#define RETRIES_EXHAUSTED(iport) \ + (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY) + +#define FNIC_TPORT_MAX_NEXUS_RESTART (8) + +#define SCHEDULE_OXID_FREE_RETRY_TIME (300) + +/* Private Functions */ +static void fdls_fdmi_register_hba(struct fnic_iport_s *iport); +static void fdls_fdmi_register_pa(struct fnic_iport_s *iport); +static void fdls_send_rpn_id(struct fnic_iport_s *iport); +static void fdls_process_flogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, + void *rx_frame); +static void fnic_fdls_start_plogi(struct fnic_iport_s *iport); +static void fnic_fdls_start_flogi(struct fnic_iport_s *iport); +static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport, + uint32_t fcid, + uint64_t wwpn); +static void fdls_target_restart_nexus(struct fnic_tport_s *tport); +static void fdls_start_tport_timer(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, int timeout); +static void fdls_tport_timer_callback(struct timer_list *t); +static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport); +static void fdls_start_fabric_timer(struct fnic_iport_s *iport, + int timeout); +static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_fabric_abts_frame(uint8_t *frame, + struct fnic_iport_s *iport); + +uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + uint8_t *frame = NULL; + + frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame"); + return NULL; + } + + memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ); + return frame; +} + +/** + * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool + * @iport: Handle to iport instance + * @oxid_frame_type: Type of frame to allocate + * @active_oxid: the oxid which is in use + * + * Called with fnic lock held + */ +uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, + uint16_t *active_oxid) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + int idx; + uint16_t oxid; + + lockdep_assert_held(&fnic->fnic_lock); + + /* + * Allocate next available oxid from bitmap + */ + idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx); + if (idx == FNIC_OXID_POOL_SZ) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Alloc oxid: all oxid slots are busy iport state:%d\n", + iport->state); + return FNIC_UNASSIGNED_OXID; + } + + WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap)); + oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */ + + oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type); + *active_oxid = oxid; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "alloc oxid: 0x%x, iport state: %d\n", + oxid, iport->state); + return oxid; +} + +/** + * fdls_free_oxid_idx - Free the oxid using the idx + * @iport: Handle to iport instance + * @oxid_idx: The index to free + * + * Free the oxid immediately and make it available for new requests + * Called with fnic lock held + */ +static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + + lockdep_assert_held(&fnic->fnic_lock); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "free oxid idx: 0x%x\n", oxid_idx); + + WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap)); +} + +/** + * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work + * @work: Handle to work_struct + * + * Scheduled when an oxid is to be freed later + * After freeing expired oxid(s), the handler schedules + * another callback with the remaining time + * of next unexpired entry in the reclaim list. + */ +void fdls_reclaim_oxid_handler(struct work_struct *work) +{ + struct fnic_oxid_pool_s *oxid_pool = container_of(work, + struct fnic_oxid_pool_s, oxid_reclaim_work.work); + struct fnic_iport_s *iport = container_of(oxid_pool, + struct fnic_iport_s, oxid_pool); + struct fnic *fnic = iport->fnic; + struct reclaim_entry_s *reclaim_entry, *next; + unsigned long delay_j, cur_jiffies; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reclaim oxid callback\n"); + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + /* Though the work was scheduled for one entry, + * walk through and free the expired entries which might have been scheduled + * at around the same time as the first entry + */ + list_for_each_entry_safe(reclaim_entry, next, + &(oxid_pool->oxid_reclaim_list), links) { + + /* The list is always maintained in the order of expiry time */ + cur_jiffies = jiffies; + if (time_before(cur_jiffies, reclaim_entry->expires)) + break; + + list_del(&reclaim_entry->links); + fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx); + kfree(reclaim_entry); + } + + /* schedule to free up the next entry */ + if (!list_empty(&oxid_pool->oxid_reclaim_list)) { + reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list, + struct reclaim_entry_s, links); + + delay_j = reclaim_entry->expires - cur_jiffies; + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Scheduling next callback at:%ld jiffies\n", delay_j); + } + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); +} + +/** + * fdls_free_oxid - Helper function to free the oxid + * @iport: Handle to iport instance + * @oxid: oxid to free + * @active_oxid: the oxid which is in use + * + * Called with fnic lock held + */ +void fdls_free_oxid(struct fnic_iport_s *iport, + uint16_t oxid, uint16_t *active_oxid) +{ + fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid)); + *active_oxid = FNIC_UNASSIGNED_OXID; +} + +/** + * fdls_schedule_oxid_free - Schedule oxid to be freed later + * @iport: Handle to iport instance + * @active_oxid: the oxid which is in use + * + * Gets called in a rare case scenario when both a command + * (fdls or target discovery) timed out and the following ABTS + * timed out as well, without a link change. + * + * Called with fnic lock held + */ +void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + struct reclaim_entry_s *reclaim_entry; + unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); + int oxid_idx = FNIC_OXID_IDX(*active_oxid); + + lockdep_assert_held(&fnic->fnic_lock); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Schedule oxid free. oxid: 0x%x\n", *active_oxid); + + *active_oxid = FNIC_UNASSIGNED_OXID; + + reclaim_entry = (struct reclaim_entry_s *) + kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC); + + if (!reclaim_entry) { + FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Failed to allocate memory for reclaim struct for oxid idx: %d\n", + oxid_idx); + + /* Retry the scheduling */ + WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free)); + schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0); + return; + } + + reclaim_entry->oxid_idx = oxid_idx; + reclaim_entry->expires = round_jiffies(jiffies + delay_j); + + list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); + + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); +} + +/** + * fdls_schedule_oxid_free_retry_work - Thread to schedule the + * oxid to be freed later + * + * @work: Handle to the work struct + */ +void fdls_schedule_oxid_free_retry_work(struct work_struct *work) +{ + struct fnic_oxid_pool_s *oxid_pool = container_of(work, + struct fnic_oxid_pool_s, schedule_oxid_free_retry.work); + struct fnic_iport_s *iport = container_of(oxid_pool, + struct fnic_iport_s, oxid_pool); + struct fnic *fnic = iport->fnic; + struct reclaim_entry_s *reclaim_entry; + unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); + unsigned long flags; + int idx; + + for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) { + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Schedule oxid free. oxid idx: %d\n", idx); + + reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL); + if (!reclaim_entry) { + schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, + msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME)); + return; + } + + clear_bit(idx, oxid_pool->pending_schedule_free); + reclaim_entry->oxid_idx = idx; + reclaim_entry->expires = round_jiffies(jiffies + delay_j); + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); + } +} + +static bool fdls_is_oxid_fabric_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + case FNIC_FRAME_TYPE_FABRIC_RPN: + case FNIC_FRAME_TYPE_FABRIC_RFT: + case FNIC_FRAME_TYPE_FABRIC_RFF: + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + case FNIC_FRAME_TYPE_FABRIC_LOGO: + break; + default: + return false; + } + return true; +} + +static bool fdls_is_oxid_fdmi_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FDMI_PLOGI: + case FNIC_FRAME_TYPE_FDMI_RHBA: + case FNIC_FRAME_TYPE_FDMI_RPA: + break; + default: + return false; + } + return true; +} + +static bool fdls_is_oxid_tgt_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_TGT_PLOGI: + case FNIC_FRAME_TYPE_TGT_PRLI: + case FNIC_FRAME_TYPE_TGT_ADISC: + case FNIC_FRAME_TYPE_TGT_LOGO: + break; + default: + return false; + } + return true; +} + +static void fdls_reset_oxid_pool(struct fnic_iport_s *iport) +{ + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + + oxid_pool->next_idx = 0; +} + +void fnic_del_fabric_timer_sync(struct fnic *fnic) +{ + fnic->iport.fabric.del_timer_inprogress = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + del_timer_sync(&fnic->iport.fabric.retry_timer); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic->iport.fabric.del_timer_inprogress = 0; +} + +void fnic_del_tport_timer_sync(struct fnic *fnic, + struct fnic_tport_s *tport) +{ + tport->del_timer_inprogress = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + del_timer_sync(&tport->retry_timer); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + tport->del_timer_inprogress = 0; +} + +static void +fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout) +{ + u64 fabric_tov; + struct fnic *fnic = iport->fnic; + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + iport->fabric.timer_pending = 0; + } + + if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + iport->fabric.retry_counter++; + + fabric_tov = jiffies + msecs_to_jiffies(timeout); + mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov)); + iport->fabric.timer_pending = 1; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fabric timer is %d ", timeout); +} + +static void +fdls_start_tport_timer(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, int timeout) +{ + u64 fabric_tov; + struct fnic *fnic = iport->fnic; + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) + tport->retry_counter++; + + fabric_tov = jiffies + msecs_to_jiffies(timeout); + mod_timer(&tport->retry_timer, round_jiffies(fabric_tov)); + tport->timer_pending = 1; +} + +void fdls_init_plogi_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_flogi *pplogi; + uint8_t s_id[3]; + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pplogi = (struct fc_std_flogi) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC}, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els = { + .fl_cmd = ELS_PLOGI, + .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, + .sp_lo_ver = FNIC_FC_PH_VER_LO, + .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), + .sp_features = cpu_to_be16(FC_SP_FT_CIRO), + .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ), + .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS), + .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO), + .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)}, + .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ), + .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800), + .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF), + .fl_cssp[2].cp_open_seq = 1} + }; + + FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn); + FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size); + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(pplogi->fchdr, s_id); +} + +static void fdls_init_els_acc_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_els_acc_rsp *pels_acc; + uint8_t s_id[3]; + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pels_acc = (struct fc_std_els_acc_rsp) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}}, + .acc.la_cmd = ELS_LS_ACC, + }; + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id); + FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID); +} + +static void fdls_init_els_rjt_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_els_rjt_rsp *pels_rjt; + + pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pels_rjt = (struct fc_std_els_rjt_rsp) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}}, + .rej.er_cmd = ELS_LS_RJT, + }; + + FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID); +} + +static void fdls_init_logo_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_logo *plogo; + uint8_t s_id[3]; + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *plogo = (struct fc_std_logo) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}}, + .els.fl_cmd = ELS_LOGO, + }; + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(plogo->fchdr, s_id); + memcpy(plogo->els.fl_n_port_id, s_id, 3); + + FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn, + iport->wwpn); +} + +static void fdls_init_fabric_abts_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_frame_header *pfabric_abts; + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pfabric_abts = (struct fc_frame_header) { + .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ + .fh_s_id = {0x00, 0x00, 0x00}, + .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, + .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, + .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), + .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ + }; +} + +static void +fdls_send_rscn_resp(struct fnic_iport_s *iport, + struct fc_frame_header *rscn_fchdr) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *pels_acc; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_acc_rsp); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RSCN response"); + return; + } + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(rscn_fchdr); + FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RSCN response with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_send_logo_resp(struct fnic_iport_s *iport, + struct fc_frame_header *req_fchdr) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *plogo_resp; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_acc_rsp); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send LOGO response"); + return; + } + + plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(req_fchdr); + FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send LOGO response with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +void +fdls_send_tport_abts(struct fnic_iport_s *iport, + struct fnic_tport_s *tport) +{ + uint8_t *frame; + uint8_t s_id[3]; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *ptport_abts; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send tport ABTS"); + return; + } + + ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *ptport_abts = (struct fc_frame_header) { + .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ + .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, + .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, + .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), + .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ + }; + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + FNIC_STD_SET_S_ID(*ptport_abts, s_id); + FNIC_STD_SET_D_ID(*ptport_abts, d_id); + tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED; + + FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tport abts: tport->state: %d ", + iport->fcid, tport->state); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov); +} +static void fdls_send_fabric_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + uint8_t s_id[3]; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *pfabric_abts; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric ABTS"); + return; + } + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_fabric_abts_frame(frame, iport); + + hton24(s_id, iport->fcid); + + switch (iport->fabric.state) { + case FDLS_STATE_FABRIC_LOGO: + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_FABRIC_FLOGI: + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_FABRIC_PLOGI: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_RPN_ID: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_SCR: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_FCTRL); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_REGISTER_FC4_TYPES: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_REGISTER_FC4_FEATURES: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_GPN_FT: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + default: + return; + } + + oxid = iport->active_oxid_fabric_req; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, oxid); + + iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED; + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); + iport->fabric.timer_pending = 1; +} + +static uint8_t *fdls_alloc_init_fdmi_abts_frame(struct fnic_iport_s *iport, + uint16_t oxid) +{ + struct fc_frame_header *pfdmi_abts; + uint8_t d_id[3]; + uint8_t *frame; + struct fnic *fnic = iport->fnic; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI ABTS"); + return NULL; + } + + pfdmi_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_fabric_abts_frame(frame, iport); + + hton24(d_id, FC_FID_MGMT_SERV); + FNIC_STD_SET_D_ID(*pfdmi_abts, d_id); + FNIC_STD_SET_OX_ID(*pfdmi_abts, oxid); + + return frame; +} + +static void fdls_send_fdmi_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fnic *fnic = iport->fnic; + unsigned long fdmi_tov; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + frame = fdls_alloc_init_fdmi_abts_frame(iport, + iport->active_oxid_fdmi_plogi); + if (frame == NULL) + return; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI PLOGI abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_plogi); + fnic_send_fcoe_frame(iport, frame, frame_size); + } else { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) { + frame = fdls_alloc_init_fdmi_abts_frame(iport, + iport->active_oxid_fdmi_rhba); + if (frame == NULL) + return; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RHBA abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rhba); + fnic_send_fcoe_frame(iport, frame, frame_size); + } + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) { + frame = fdls_alloc_init_fdmi_abts_frame(iport, + iport->active_oxid_fdmi_rpa); + if (frame == NULL) { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) + goto arm_timer; + else + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RPA abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rpa); + fnic_send_fcoe_frame(iport, frame, frame_size); + } + } + +arm_timer: + fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); + mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); + iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); +} + +static void fdls_send_fabric_flogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pflogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FLOGI"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pflogi = (struct fc_std_flogi) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE}, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els.fl_cmd = ELS_FLOGI, + .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, + .sp_lo_ver = FNIC_FC_PH_VER_LO, + .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), + .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)}, + .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) + }; + + FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn); + FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size); + FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov); + FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_flogi_sent); +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_fabric_plogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send PLOGI"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send fabric PLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_plogi_sent); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + uint8_t d_id[3]; + u64 fdmi_tov; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI PLOGI"); + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI, + &iport->active_oxid_fdmi_plogi); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI PLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + hton24(d_id, FC_FID_MGMT_SERV); + FNIC_STD_SET_D_ID(pplogi->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI PLOGI with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); + mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); + iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING; +} + +static void fdls_send_rpn_id(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rpn_id *prpn_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rpn_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RPN_ID"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prpn_id = (struct fc_std_rpn_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid); + + FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid); + FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RPN_ID", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_scr(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_scr *pscr; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_scr); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send SCR"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pscr = (struct fc_std_scr) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, + .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .scr = {.scr_cmd = ELS_SCR, + .scr_reg_func = ELS_SCRF_FULL} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(pscr->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send SCR", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pscr->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_scr_sent); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state) +{ + uint8_t *frame; + struct fc_std_gpn_ft *pgpn_ft; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_gpn_ft); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send GPN FT"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pgpn_ft = (struct fc_std_gpn_ft) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)}, + .gpn_ft.fn_fc4_type = 0x08 + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send GPN FT", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); + fdls_set_state((&iport->fabric), fdls_state); +} + +static void +fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_els_adisc *padisc; + uint8_t s_id[3]; + uint8_t d_id[3]; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_adisc); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT ADISC"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + memcpy(padisc->els.adisc_port_id, s_id, 3); + FNIC_STD_SET_S_ID(padisc->fchdr, s_id); + FNIC_STD_SET_D_ID(padisc->fchdr, d_id); + + FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16); + FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ); + FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send TGT ADISC", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(padisc->fchdr, oxid); + FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID); + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn, + iport->wwnn); + + padisc->els.adisc_cmd = ELS_ADISC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send ADISC to tgt fcid: 0x%x", + iport->fcid, tport->fcid); + + atomic64_inc(&iport->iport_stats.tport_adisc_sent); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov); +} + +bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + struct fnic_tport_event_s *tport_del_evt; + struct fnic *fnic = iport->fnic; + + if ((tport->state == FDLS_TGT_STATE_OFFLINING) + || (tport->state == FDLS_TGT_STATE_OFFLINE)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: tport state is offlining/offline\n", + tport->fcid); + return false; + } + + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); + /* + * By setting this flag, the tport will not be seen in a look-up + * in an RSCN. Even if we move to multithreaded model, this tport + * will be destroyed and a new RSCN will have to create a new one + */ + tport->flags |= FNIC_FDLS_TPORT_TERMINATING; + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_rport_exch_reset(iport->fnic, tport->fcid); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) { + tport_del_evt = + kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_del_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for tport fcid: 0x%0x\n", + tport->fcid); + return false; + } + tport_del_evt->event = TGT_EV_RPORT_DEL; + tport_del_evt->arg1 = (void *) tport; + list_add_tail(&tport_del_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport 0x%x not reg with scsi_transport. Freeing locally", + tport->fcid); + list_del(&tport->links); + kfree(tport); + } + return true; +} + +static void +fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + uint8_t d_id[3]; + uint32_t timeout; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT PLOGI"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x", + iport->fcid, tport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + hton24(d_id, tport->fcid); + FNIC_STD_SET_D_ID(pplogi->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x", + iport->fcid, tport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.tport_plogi_sent); + +err_out: + timeout = max(2 * iport->e_d_tov, iport->plogi_timeout); + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, timeout); +} + +static uint16_t +fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport, + struct fc_std_flogi *plogi_rsp) +{ + uint16_t b2b_rdf_size = + be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els)); + uint16_t spc3_rdf_size = + be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x", + b2b_rdf_size, spc3_rdf_size); + + return min(b2b_rdf_size, spc3_rdf_size); +} + +static void fdls_send_register_fc4_types(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rft_id *prft_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rft_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RFT"); + return; + } + + prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prft_id = (struct fc_std_rft_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prft_id->fchdr, fcid); + FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RFT", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid, + oxid); + + prft_id->rft_id.fr_fts.ff_type_map[0] = + cpu_to_be32(1 << FC_TYPE_FCP); + + prft_id->rft_id.fr_fts.ff_type_map[1] = + cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW)); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_register_fc4_features(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rff_id *prff_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rff_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RFF"); + return; + } + + prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prff_id = (struct fc_std_rff_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)}, + .rff_id.fr_feat = 0x2, + .rff_id.fr_type = FC_TYPE_FCP + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prff_id->fchdr, fcid); + FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RFF", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid, + oxid); + + prff_id->rff_id.fr_type = FC_TYPE_FCP; + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void +fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_els_prli *pprli; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_prli); + uint8_t s_id[3]; + uint8_t d_id[3]; + uint32_t timeout; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT PRLI"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pprli = (struct fc_std_els_prli) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els_prli = {.prli_cmd = ELS_PRLI, + .prli_spp_len = 16, + .prli_len = cpu_to_be16(0x14)}, + .sp = {.spp_type = 0x08, .spp_flags = 0x0020, + .spp_params = cpu_to_be32(0xA2)} + }; + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x", + iport->fcid, tport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + + FNIC_STD_SET_OX_ID(pprli->fchdr, oxid); + FNIC_STD_SET_S_ID(pprli->fchdr, s_id); + FNIC_STD_SET_D_ID(pprli->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x", + iport->fcid, tport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.tport_prli_sent); + +err_out: + timeout = max(2 * iport->e_d_tov, iport->plogi_timeout); + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, timeout); +} + +/** + * fdls_send_fabric_logo - Send flogo to the fcf + * @iport: Handle to fnic iport + * + * This function does not change or check the fabric state. + * It the caller's responsibility to set the appropriate iport fabric + * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO. + * Currently this assumes to be called with fnic lock held. + */ +void fdls_send_fabric_logo(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_logo *plogo; + struct fnic *fnic = iport->fnic; + uint8_t d_id[3]; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_logo); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric LOGO"); + return; + } + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_logo_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send fabric LOGO", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); + + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(plogo->fchdr, d_id); + + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric LOGO with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +/** + * fdls_tgt_logout - Send plogo to the remote port + * @iport: Handle to fnic iport + * @tport: Handle to remote port + * + * This function does not change or check the fabric/tport state. + * It the caller's responsibility to set the appropriate tport/fabric + * state when this is called. Normally that is fdls_tgt_state_plogo. + * This could be used to send plogo to nameserver process + * also not just target processes + */ +void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_logo *plogo; + struct fnic *fnic = iport->fnic; + uint8_t d_id[3]; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_logo); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric LOGO"); + return; + } + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_logo_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send tgt LOGO", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); + + hton24(d_id, tport->fcid); + FNIC_STD_SET_D_ID(plogo->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tgt LOGO with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + atomic64_inc(&iport->iport_stats.tport_logo_sent); +} + +static void fdls_tgt_discovery_start(struct fnic_iport_s *iport) +{ + struct fnic_tport_s *tport, *next; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Starting FDLS target discovery", iport->fcid); + + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + break; + } + /* if we marked the tport as deleted due to GPN_FT + * We should not send ADISC anymore + */ + if ((tport->state == FDLS_TGT_STATE_OFFLINING) || + (tport->state == FDLS_TGT_STATE_OFFLINE)) + continue; + + /* For tports which have received RSCN */ + if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) { + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC); + tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC; + fdls_send_tgt_adisc(iport, tport); + continue; + } + if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) { + /* Not a new port, skip */ + continue; + } + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + fdls_send_tgt_plogi(iport, tport); + } + fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY); +} + +/* + * Function to restart the IT nexus if we received any out of + * sequence PLOGI/PRLI response from the target. + * The memory for the new tport structure is allocated + * inside fdls_create_tport and added to the iport's tport list. + * This will get freed later during tport_offline/linkdown + * or module unload. The new_tport pointer will go out of scope + * safely since the memory it is + * pointing to it will be freed later + */ +static void fdls_target_restart_nexus(struct fnic_tport_s *tport) +{ + struct fnic_iport_s *iport = tport->iport; + struct fnic_tport_s *new_tport = NULL; + uint32_t fcid; + uint64_t wwpn; + int nexus_restart_count; + struct fnic *fnic = iport->fnic; + bool retval = true; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid: 0x%x state: %d restart_count: %d", + tport->fcid, tport->state, tport->nexus_restart_count); + + fcid = tport->fcid; + wwpn = tport->wwpn; + nexus_restart_count = tport->nexus_restart_count; + + retval = fdls_delete_tport(iport, tport); + if (retval != true) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Error deleting tport: 0x%x", fcid); + return; + } + + if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded nexus restart retries tport: 0x%x", + fcid); + return; + } + + /* + * Allocate memory for the new tport and add it to + * iport's tport list. + * This memory will be freed during tport_offline/linkdown + * or module unload. The pointer new_tport is safe to go + * out of scope when this function returns, since the memory + * it is pointing to is guaranteed to be freed later + * as mentioned above. + */ + new_tport = fdls_create_tport(iport, fcid, wwpn); + if (!new_tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Error creating new tport: 0x%x", fcid); + return; + } + + new_tport->nexus_restart_count = nexus_restart_count + 1; + fdls_send_tgt_plogi(iport, new_tport); + fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI); +} + +struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport, + uint32_t fcid) +{ + struct fnic_tport_s *tport, *next; + + list_for_each_entry_safe(tport, next, &(iport->tport_list), links) { + if ((tport->fcid == fcid) + && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING)) + return tport; + } + return NULL; +} + +static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport, + uint32_t fcid, uint64_t wwpn) +{ + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS create tport: fcid: 0x%x wwpn: 0x%llx", fcid, wwpn); + + tport = kzalloc(sizeof(struct fnic_tport_s), GFP_ATOMIC); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Memory allocation failure while creating tport: 0x%x\n", + fcid); + return NULL; + } + + tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ; + tport->r_a_tov = FC_DEF_R_A_TOV; + tport->e_d_tov = FC_DEF_E_D_TOV; + tport->fcid = fcid; + tport->wwpn = wwpn; + tport->iport = iport; + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Need to setup tport timer callback"); + + timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Added tport 0x%x", tport->fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT); + list_add_tail(&tport->links, &iport->tport_list); + atomic_set(&tport->in_flight, 0); + return tport; +} + +struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport, + uint64_t wwpn) +{ + struct fnic_tport_s *tport, *next; + + list_for_each_entry_safe(tport, next, &(iport->tport_list), links) { + if ((tport->wwpn == wwpn) + && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING)) + return tport; + } + return NULL; +} + +static void +fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len, + void *data, u32 *off) +{ + u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN; + struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *) + ((u8 *)attr_start + *off); + + put_unaligned_be16(type, &fdmi_attr->type); + put_unaligned_be16(size, &fdmi_attr->len); + memcpy(fdmi_attr->value, data, len); + *off += size; +} + +static void fdls_fdmi_register_hba(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_fdmi_rhba *prhba; + struct fc_fdmi_attr_entry *fdmi_attr; + uint8_t fcid[3]; + int err; + struct fnic *fnic = iport->fnic; + struct vnic_devcmd_fw_info *fw_info = NULL; + uint16_t oxid; + u32 attr_off_bytes, len; + u8 data[64]; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI RHBA"); + return; + } + + prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prhba = (struct fc_std_fdmi_rhba) { + .fchdr = { + .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0XFF, 0XFA}, + .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID) + }, + .fc_std_ct_hdr = { + .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT, + .ct_fs_subtype = FC_FDMI_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_FDMI_RHBA) + }, + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prhba->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA, + &iport->active_oxid_fdmi_rhba); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI RHBA", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prhba->fchdr, oxid); + + put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id); + put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport); + put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname); + put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS, + &prhba->rhba.hba_attrs.numattrs); + + fdmi_attr = prhba->rhba.hba_attrs.attr; + attr_off_bytes = 0; + + put_unaligned_be64(iport->wwnn, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME, + FNIC_FDMI_NN_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "NN set, off=%d", attr_off_bytes); + + strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER, + FNIC_FDMI_MANU_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFG set <%s>, off=%d", data, attr_off_bytes); + + err = vnic_dev_fw_info(fnic->vdev, &fw_info); + if (!err) { + strscpy_pad(data, fw_info->hw_serial_number, + FNIC_FDMI_SERIAL_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER, + FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SERIAL set <%s>, off=%d", data, attr_off_bytes); + + } + + if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN) + fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1; + strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN, + data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MODEL set <%s>, off=%d", data, attr_off_bytes); + + strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES, + FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MODEL_DESC set <%s>, off=%d", data, attr_off_bytes); + + if (!err) { + strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION, + FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "HW_VER set <%s>, off=%d", data, attr_off_bytes); + + } + + strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION, + FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "DRV_VER set <%s>, off=%d", data, attr_off_bytes); + + strscpy_pad(data, "N/A", FNIC_FDMI_ROM_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION, + FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ROM_VER set <%s>, off=%d", data, attr_off_bytes); + + if (!err) { + strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION, + FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FW_VER set <%s>, off=%d", data, attr_off_bytes); + } + + len = sizeof(struct fc_std_fdmi_rhba) + attr_off_bytes; + frame_size += len; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d", iport->fcid, + oxid, frame_size); + + fnic_send_fcoe_frame(iport, frame, frame_size); + iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING; +} + +static void fdls_fdmi_register_pa(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_fdmi_rpa *prpa; + struct fc_fdmi_attr_entry *fdmi_attr; + uint8_t fcid[3]; + struct fnic *fnic = iport->fnic; + u32 port_speed_bm; + u32 port_speed = vnic_dev_port_speed(fnic->vdev); + uint16_t oxid; + u32 attr_off_bytes, len; + u8 tmp_data[16], data[64]; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI RPA"); + return; + } + + prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prpa = (struct fc_std_fdmi_rpa) { + .fchdr = { + .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFA}, + .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID) + }, + .fc_std_ct_hdr = { + .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT, + .ct_fs_subtype = FC_FDMI_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_FDMI_RPA) + }, + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prpa->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA, + &iport->active_oxid_fdmi_rpa); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI RPA", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prpa->fchdr, oxid); + + put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname); + put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS, + &prpa->rpa.hba_attrs.numattrs); + + /* MDS does not support GIGE speed. + * Bit shift standard definitions from scsi_transport_fc.h to + * match FC spec. + */ + switch (port_speed) { + case DCEM_PORTSPEED_10G: + case DCEM_PORTSPEED_20G: + /* There is no bit for 20G */ + port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14; + break; + case DCEM_PORTSPEED_25G: + port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9; + break; + case DCEM_PORTSPEED_100G: + port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8; + break; + default: + port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15; + break; + } + attr_off_bytes = 0; + + fdmi_attr = prpa->rpa.hba_attrs.attr; + + put_unaligned_be64(iport->wwnn, data); + + memset(data, 0, FNIC_FDMI_FC4_LEN); + data[2] = 1; + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES, + FNIC_FDMI_FC4_LEN, data, &attr_off_bytes); + + put_unaligned_be32(port_speed_bm, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS, + FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes); + + put_unaligned_be32(port_speed_bm, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED, + FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes); + + put_unaligned_be32(FNIC_FDMI_MFS, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE, + FNIC_FDMI_MFS_LEN, data, &attr_off_bytes); + + snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d", + fnic->host->host_no); + strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME, + FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "OS name set <%s>, off=%d", data, attr_off_bytes); + + sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename); + strscpy_pad(data, fc_host_system_hostname(fnic->host), + FNIC_FDMI_HN_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME, + FNIC_FDMI_HN_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Host name set <%s>, off=%d", data, attr_off_bytes); + + len = sizeof(struct fc_std_fdmi_rpa) + attr_off_bytes; + frame_size += len; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d", iport->fcid, + oxid, frame_size); + + fnic_send_fcoe_frame(iport, frame, frame_size); + iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING; +} + +void fdls_fabric_timer_callback(struct timer_list *t) +{ + struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer); + struct fnic_iport_s *iport = + container_of(fabric, struct fnic_iport_s, fabric); + struct fnic *fnic = iport->fnic; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d", + iport->fabric.timer_pending, iport->fabric.state, + iport->fabric.retry_counter, iport->max_flogi_retries); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (!iport->fabric.timer_pending) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (iport->fabric.del_timer_inprogress) { + iport->fabric.del_timer_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fabric_del_timer inprogress(%d). Skip timer cb", + iport->fabric.del_timer_inprogress); + return; + } + + iport->fabric.timer_pending = 0; + + /* The fabric state indicates which frames have time out, and we retry */ + switch (iport->fabric.state) { + case FDLS_STATE_FABRIC_FLOGI: + /* Flogi received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < iport->max_flogi_retries)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_fabric_flogi(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* Flogi has time out 2*ed_tov send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out + * Mark the OXID to be freed after 2 * r_a_tov and retry the req + */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < iport->max_flogi_retries) { + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + fdls_send_fabric_flogi(iport); + } else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max FLOGI retries"); + } + break; + case FDLS_STATE_FABRIC_PLOGI: + /* Plogi received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < iport->max_plogi_retries)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_fabric_plogi(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* Plogi has timed out 2*ed_tov send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out + * Mark the OXID to be freed after 2 * r_a_tov and retry the req + */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < iport->max_plogi_retries) { + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + fdls_send_fabric_plogi(iport); + } else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max PLOGI retries"); + } + break; + case FDLS_STATE_RPN_ID: + /* Rpn_id received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_rpn_id(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* RPN has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_SCR: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_scr(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* scr has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI: %p", iport); + fnic_fdls_start_plogi(iport); + } + break; + case FDLS_STATE_REGISTER_FC4_TYPES: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_register_fc4_types(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* RFT_ID timed out send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI: %p", iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_REGISTER_FC4_FEATURES: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_register_fc4_features(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* SCR has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI %p", iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_RSCN_GPN_FT: + case FDLS_STATE_SEND_GPNFT: + case FDLS_STATE_GPN_FT: + /* GPN_FT received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_gpn_ft(iport, iport->fabric.state); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* gpn_ft has timed out. Send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) { + fdls_send_gpn_ft(iport, iport->fabric.state); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timeout for fabric GPN_FT. Check name server: %p", + iport); + } + } + break; + default: + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + + iport->fabric.fdmi_pending = 0; + /* If max retries not exhausted, start over from fdmi plogi */ + if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) { + iport->fabric.fdmi_retry++; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Retry FDMI PLOGI. FDMI retry: %d", + iport->fabric.fdmi_retry); + fdls_send_fdmi_plogi(iport); + } +} + +void fdls_fdmi_timer_callback(struct timer_list *t) +{ + struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer); + struct fnic_iport_s *iport = + container_of(fabric, struct fnic_iport_s, fabric); + struct fnic *fnic = iport->fnic; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); + + if (!iport->fabric.fdmi_pending) { + /* timer expired after fdmi responses received. */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); + + /* if not abort pending, send an abort */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) { + fdls_send_fdmi_abts(iport); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); + + /* ABTS pending for an active fdmi request that is pending. + * That means FDMI ABTS timed out + * Schedule to free the OXID after 2*r_a_tov and proceed + */ + if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDMI PLOGI ABTS timed out. Schedule oxid free: 0x%x\n", + iport->active_oxid_fdmi_plogi); + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi); + } else { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDMI RHBA ABTS timed out. Schedule oxid free: 0x%x\n", + iport->active_oxid_fdmi_rhba); + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba); + } + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDMI RPA ABTS timed out. Schedule oxid free: 0x%x\n", + iport->active_oxid_fdmi_rpa); + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa); + } + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); + + fdls_fdmi_retry_plogi(iport); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport) +{ + struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport; + struct fnic *fnic = iport->fnic; + struct fnic_tport_event_s *tport_del_evt; + + tport_del_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_del_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for tport event fcid: 0x%x", + tport->fcid); + return; + } + tport_del_evt->event = TGT_EV_TPORT_DELETE; + tport_del_evt->arg1 = (void *) tport; + list_add_tail(&tport_del_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); +} + +static void fdls_tport_timer_callback(struct timer_list *t) +{ + struct fnic_tport_s *tport = from_timer(tport, t, retry_timer); + struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!tport->timer_pending) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (tport->del_timer_inprogress) { + tport->del_timer_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport_del_timer inprogress. Skip timer cb tport fcid: 0x%x\n", + tport->fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid: 0x%x timer pending: %d state: %d retry counter: %d", + tport->fcid, tport->timer_pending, tport->state, + tport->retry_counter); + + tport->timer_pending = 0; + oxid = tport->active_oxid; + + /* We retry plogi/prli/adisc frames depending on the tport state */ + switch (tport->state) { + case FDLS_TGT_STATE_PLOGI: + /* PLOGI frame received a LS_RJT with busy, we retry from here */ + if ((tport->flags & FNIC_FDLS_RETRY_FRAME) + && (tport->retry_counter < iport->max_plogi_retries)) { + tport->flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_tgt_plogi(iport, tport); + } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + /* Plogi frame has timed out, send abts */ + fdls_send_tport_abts(iport, tport); + } else if (tport->retry_counter < iport->max_plogi_retries) { + /* + * ABTS has timed out + */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + } else { + /* exceeded plogi retry count */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_delete_tport_msg(tport); + } + break; + case FDLS_TGT_STATE_PRLI: + /* PRLI received a LS_RJT with busy , hence we retry from here */ + if ((tport->flags & FNIC_FDLS_RETRY_FRAME) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + tport->flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_tgt_prli(iport, tport); + } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + /* PRLI has time out, send abts */ + fdls_send_tport_abts(iport, tport); + } else { + /* ABTS has timed out for prli, we go back to PLOGI */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + } + break; + case FDLS_TGT_STATE_ADISC: + /* ADISC timed out send an ABTS */ + if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + fdls_send_tport_abts(iport, tport); + } else if ((tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + /* + * ABTS has timed out + */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_adisc(iport, tport); + } else { + /* exceeded retry count */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC not responding. Deleting target port: 0x%x", + tport->fcid); + fdls_send_delete_tport_msg(tport); + } + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "oxid: 0x%x Unknown tport state: 0x%x", oxid, tport->state); + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fnic_fdls_start_flogi(struct fnic_iport_s *iport) +{ + iport->fabric.retry_counter = 0; + fdls_send_fabric_flogi(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI); + iport->fabric.flags = 0; +} + +static void fnic_fdls_start_plogi(struct fnic_iport_s *iport) +{ + iport->fabric.retry_counter = 0; + fdls_send_fabric_plogi(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI); + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + if ((fnic_fdmi_support == 1) && (!(iport->flags & FNIC_FDMI_ACTIVE))) { + /* we can do FDMI at the same time */ + iport->fabric.fdmi_retry = 0; + timer_setup(&iport->fabric.fdmi_timer, fdls_fdmi_timer_callback, + 0); + fdls_send_fdmi_plogi(iport); + iport->flags |= FNIC_FDMI_ACTIVE; + } +} +static void +fdls_process_tgt_adisc_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint64_t frame_wwnn; + uint64_t frame_wwpn; + uint16_t oxid; + struct fc_std_els_adisc *adisc_rsp = (struct fc_std_els_adisc *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic *fnic = iport->fnic; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Tgt ADISC response tport not found: 0x%x", tgt_fcid); + return; + } + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->state != FDLS_TGT_STATE_ADISC) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping this ADISC response"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport state: %d tport state: %d Is abort issued on PRLI? %d", + iport->state, tport->state, + (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)); + return; + } + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame from target: 0x%x", + tgt_fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reason: Stale ADISC/Aborted ADISC/OOO frame delivery"); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (adisc_rsp->els.adisc_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_adisc_ls_accepts); + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport 0x%p Canceling fabric disc timer\n", + tport); + fnic_del_tport_timer_sync(fnic, tport); + } + tport->timer_pending = 0; + tport->retry_counter = 0; + frame_wwnn = get_unaligned_be64(&adisc_rsp->els.adisc_wwnn); + frame_wwpn = get_unaligned_be64(&adisc_rsp->els.adisc_wwpn); + if ((frame_wwnn == tport->wwnn) && (frame_wwpn == tport->wwpn)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC accepted from target: 0x%x. Target logged in", + tgt_fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_READY); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Error mismatch frame: ADISC"); + } + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_adisc_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + + /* Retry ADISC again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + fdls_delete_tport(iport, tport); + } + break; + } +} +static void +fdls_process_tgt_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint16_t oxid; + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + uint16_t max_payload_size; + struct fnic *fnic = iport->fnic; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS processing target PLOGI response: tgt_fcid: 0x%x", + tgt_fcid); + + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport not found: 0x%x", tgt_fcid); + return; + } + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame! iport state: %d tport state: %d", + iport->state, tport->state); + return; + } + + if (tport->state != FDLS_TGT_STATE_PLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI rsp recvd in wrong state. Drop the frame and restart nexus"); + fdls_target_restart_nexus(tport); + return; + } + + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI response from target: 0x%x. Dropping frame", + tgt_fcid); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_plogi_ls_accepts); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI accepted by target: 0x%x", tgt_fcid); + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_plogi_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < iport->max_plogi_retries)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + /* Retry plogi again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + fdls_delete_tport(iport, tport); + return; + + default: + atomic64_inc(&iport->iport_stats.tport_plogi_misc_rejects); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI not accepted from target fcid: 0x%x", + tgt_fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Found the PLOGI target: 0x%x and state: %d", + (unsigned int) tgt_fcid, tport->state); + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + } + + tport->timer_pending = 0; + tport->wwpn = get_unaligned_be64(&FNIC_LOGI_PORT_NAME(plogi_rsp->els)); + tport->wwnn = get_unaligned_be64(&FNIC_LOGI_NODE_NAME(plogi_rsp->els)); + + /* Learn the Service Params */ + + /* Max frame size - choose the lowest */ + max_payload_size = fnic_fc_plogi_rsp_rdf(iport, plogi_rsp); + tport->max_payload_size = + min(max_payload_size, iport->max_payload_size); + + if (tport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFS: tport max frame size below spec bounds: %d", + tport->max_payload_size); + tport->max_payload_size = FNIC_MIN_DATA_FIELD_SIZE; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MAX frame size: %u iport max_payload_size: %d tport mfs: %d", + max_payload_size, iport->max_payload_size, + tport->max_payload_size); + + tport->max_concur_seqs = FNIC_FC_PLOGI_RSP_CONCUR_SEQ(plogi_rsp); + + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_PRLI); + fdls_send_tgt_prli(iport, tport); +} +static void +fdls_process_tgt_prli_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint16_t oxid; + struct fc_std_els_prli *prli_rsp = (struct fc_std_els_prli *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic_tport_event_s *tport_add_evt; + struct fnic *fnic = iport->fnic; + bool mismatched_tgt = false; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process tgt PRLI response: 0x%x", tgt_fcid); + + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport not found: 0x%x", tgt_fcid); + /* Handle or just drop? */ + return; + } + + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame! iport st: %d tport st: %d tport fcid: 0x%x", + iport->state, tport->state, tport->fcid); + return; + } + + if (tport->state != FDLS_TGT_STATE_PRLI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI rsp recvd in wrong state. Drop frame. Restarting nexus"); + fdls_target_restart_nexus(tport); + return; + } + + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping PRLI response from target: 0x%x ", + tgt_fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reason: Stale PRLI response/Aborted PDISC/OOO frame delivery"); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (prli_rsp->els_prli.prli_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_prli_ls_accepts); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI accepted from target: 0x%x", tgt_fcid); + + if (prli_rsp->sp.spp_type != FC_FC4_TYPE_SCSI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "mismatched target zoned with FC SCSI initiator: 0x%x", + tgt_fcid); + mismatched_tgt = true; + } + if (mismatched_tgt) { + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + } + break; + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_prli_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + + /*Retry Plogi again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + default: + atomic64_inc(&iport->iport_stats.tport_prli_misc_rejects); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI not accepted from target: 0x%x", tgt_fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Found the PRLI target: 0x%x and state: %d", + (unsigned int) tgt_fcid, tport->state); + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + } + tport->timer_pending = 0; + + /* Learn Service Params */ + tport->fcp_csp = be32_to_cpu(prli_rsp->sp.spp_params); + tport->retry_counter = 0; + + if (tport->fcp_csp & FCP_SPPF_RETRY) + tport->tgt_flags |= FNIC_FC_RP_FLAGS_RETRY; + + /* Check if the device plays Target Mode Function */ + if (!(tport->fcp_csp & FCP_PRLI_FUNC_TARGET)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remote port(0x%x): no target support. Deleting it\n", + tgt_fcid); + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + } + + fdls_set_tport_state(tport, FDLS_TGT_STATE_READY); + + /* Inform the driver about new target added */ + tport_add_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_add_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport event memory allocation failure: 0x%0x\n", + tport->fcid); + return; + } + tport_add_evt->event = TGT_EV_RPORT_ADD; + tport_add_evt->arg1 = (void *) tport; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x add tport event fcid: 0x%x\n", + tport->fcid, iport->fcid); + list_add_tail(&tport_add_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); +} + + +static void +fdls_process_rff_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic *fnic = iport->fnic; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_set_state((&iport->fabric), FDLS_STATE_SCR); + fdls_send_scr(iport); + break; + case FC_FS_RJT: + reason_code = rff_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_rft_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFT_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + + rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_send_register_fc4_features(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES); + break; + case FC_FS_RJT: + reason_code = rft_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine", + iport->fcid); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d", + iport->fcid, reason_code, + rft_rsp->fc_std_ct_hdr.ct_explan); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_rpn_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_send_register_fc4_types(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES); + break; + case FC_FS_RJT: + reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID returned REJ BUSY. Retry from timer routine %p", + iport); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID ELS_LS_RJT. Halting discovery %p", iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_scr_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process SCR response: 0x%04x", + (uint32_t) scr_rsp->scr.scr_cmd); + + if (fdls_get_state(fdls) != FDLS_STATE_SCR) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (scr_rsp->scr.scr_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_scr_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT); + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_scr_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + + default: + atomic64_inc(&iport->iport_stats.fabric_scr_misc_rejects); + break; + } +} + +static void +fdls_process_gpn_ft_tgt_list(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, int len) +{ + struct fc_gpn_ft_rsp_iu *gpn_ft_tgt; + struct fnic_tport_s *tport, *next; + uint32_t fcid; + uint64_t wwpn; + int rem_len = len; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process GPN_FT tgt list", iport->fcid); + + gpn_ft_tgt = + (struct fc_gpn_ft_rsp_iu *)((uint8_t *) fchdr + + sizeof(struct fc_frame_header) + + sizeof(struct fc_ct_hdr)); + len -= sizeof(struct fc_frame_header) + sizeof(struct fc_ct_hdr); + + while (rem_len > 0) { + + fcid = ntoh24(gpn_ft_tgt->fcid); + wwpn = be64_to_cpu(gpn_ft_tgt->wwpn); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport: 0x%x: ctrl:0x%x", fcid, gpn_ft_tgt->ctrl); + + if (fcid == iport->fcid) { + if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST) + break; + gpn_ft_tgt++; + rem_len -= sizeof(struct fc_gpn_ft_rsp_iu); + continue; + } + + tport = fnic_find_tport_by_wwpn(iport, wwpn); + if (!tport) { + /* + * New port registered with the switch or first time query + */ + tport = fdls_create_tport(iport, fcid, wwpn); + if (!tport) + return; + } + /* + * check if this was an existing tport with same fcid + * but whose wwpn has changed now ,then remove it and + * create a new one + */ + if (tport->fcid != fcid) { + fdls_delete_tport(iport, tport); + tport = fdls_create_tport(iport, fcid, wwpn); + if (!tport) + return; + } + + /* + * If this GPN_FT rsp is after RSCN then mark the tports which + * matches with the new GPN_FT list, if some tport is not + * found in GPN_FT we went to delete that tport later. + */ + if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) + tport->flags |= FNIC_FDLS_TPORT_IN_GPN_FT_LIST; + + if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST) + break; + + gpn_ft_tgt++; + rem_len -= sizeof(struct fc_gpn_ft_rsp_iu); + } + if (rem_len <= 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT response: malformed/corrupt frame rxlen: %d remlen: %d", + len, rem_len); +} + + /*remove those ports which was not listed in GPN_FT */ + if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) { + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + + if (!(tport->flags & FNIC_FDLS_TPORT_IN_GPN_FT_LIST)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remove port: 0x%x not found in GPN_FT list", + tport->fcid); + fdls_delete_tport(iport, tport); + } else { + tport->flags &= ~FNIC_FDLS_TPORT_IN_GPN_FT_LIST; + } + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + return; + } + } + } +} + +static void +fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, int len) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr; + uint16_t rsp; + uint8_t reason_code; + int count = 0; + struct fnic_tport_s *tport, *next; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process GPN_FT response: iport state: %d len: %d", + iport->state, len); + + /* + * GPNFT response :- + * FDLS_STATE_GPN_FT : GPNFT send after SCR state + * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC) + * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN + * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target, + * e.g. after receiving Target LOGO + * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress + * from previous GPNFT response,a new GPNFT response has come. + */ + if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC) + && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT)) + || ((iport->state == FNIC_IPORT_STATE_READY) + && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT) + || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT) + || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.", + fdls_get_state(fdls), iport->state); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + iport->state = FNIC_IPORT_STATE_READY; + rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr)); + + switch (rsp) { + + case FC_FS_ACC: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP accept", iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_process_gpn_ft_tgt_list(iport, fchdr, len); + + /* + * iport state can change only if link down event happened + * We don't need to undo fdls_process_gpn_ft_tgt_list, + * that will be taken care in next link up event + */ + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Halting target discovery: fab st: %d iport st: %d ", + fdls_get_state(fdls), iport->state); + break; + } + fdls_tgt_discovery_start(iport); + break; + + case FC_FS_RJT: + reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code); + + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP ret REJ/BSY. Retry from timer routine", + iport->fcid); + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP reject", iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + /* + * If GPN_FT ls_rjt then we should delete + * all existing tports + */ + count = 0; + list_for_each_entry_safe(tport, next, &iport->tport_list, + links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT_REJECT: Remove port: 0x%x", + tport->fcid); + fdls_delete_tport(iport, tport); + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + return; + } + count++; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT_REJECT: Removed (0x%x) ports", count); + } + break; + + default: + break; + } +} + +/** + * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf + * @iport: Handle to fnic iport + * @fchdr: Incoming frame + */ +static void +fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (flogo_rsp->els.fl_cmd) { + case ELS_LS_ACC: + if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response. Fabric not in LOGO state. Dropping! %p", + iport); + return; + } + + iport->fabric.state = FDLS_STATE_FLOGO_DONE; + iport->state = FNIC_IPORT_STATE_LINK_WAIT; + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport 0x%p Canceling fabric disc timer\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response from Fabric for did: 0x%x", + ntoh24(fchdr->fh_d_id)); + return; + + case ELS_LS_RJT: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT", + ntoh24(fchdr->fh_d_id)); + return; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGO response not accepted or rejected: 0x%x", + flogo_rsp->els.fl_cmd); + } +} + +static void +fdls_process_flogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, void *rx_frame) +{ + struct fnic_fdls_fabric_s *fabric = &iport->fabric; + struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr; + uint8_t *fcid; + uint16_t rdf_size; + uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 }; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS processing FLOGI response", iport->fcid); + + if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received in state (%d). Dropping frame", + fdls_get_state(fabric)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req); + return; + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (flogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_flogi_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fcid = FNIC_STD_GET_D_ID(fchdr); + iport->fcid = ntoh24(fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FLOGI response accepted", iport->fcid); + + /* Learn the Service Params */ + rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els)); + if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE) + && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN)) + iport->max_payload_size = min(rdf_size, + iport->max_payload_size); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "max_payload_size from fabric: %u set: %d", rdf_size, + iport->max_payload_size); + + iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els)); + iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els)); + + if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC) + iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "From fabric: R_A_TOV: %d E_D_TOV: %d", + iport->r_a_tov, iport->e_d_tov); + + fc_host_fabric_name(iport->fnic->host) = + get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els)); + fc_host_port_id(iport->fnic->host) = iport->fcid; + + fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid); + + if (fnic_fdls_register_portid(iport, iport->fcid, rx_frame) != 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FLOGI registration failed", iport->fcid); + break; + } + + memcpy(&fcmac[3], fcid, 3); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x", + fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], + fcmac[5]); + vnic_dev_add_addr(iport->fnic->vdev, fcmac); + + if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) { + fnic_fdls_start_plogi(iport); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received. Starting PLOGI"); + } else { + /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to + * FDLS_STATE_LINKDOWN + * state, hence we don't have to worry about undoing: + * the fnic_fdls_register_portid and vnic_dev_add_addr + */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received in state (%d). Dropping frame", + fdls_get_state(fabric)); + } + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_flogi_ls_rejects); + if (fabric->retry_counter < iport->max_flogi_retries) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + + /* Retry Flogi again from the timer routine. */ + fabric->flags |= FNIC_FDLS_RETRY_FRAME; + + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport 0x%p Canceling fabric disc timer\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + fabric->timer_pending = 0; + fabric->retry_counter = 0; + } + break; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response not accepted: 0x%x", + flogi_rsp->els.fl_cmd); + atomic64_inc(&iport->iport_stats.fabric_flogi_misc_rejects); + break; + } +} + +static void +fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Fabric PLOGI response received in state (%d). Dropping frame", + fdls_get_state(&iport->fabric)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_plogi_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x fabric PLOGI response: Accepted\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID); + fdls_send_rpn_id(iport); + break; + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_plogi_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (iport->fabric.retry_counter < iport->max_plogi_retries)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine", + iport->fcid); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery", + iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + return; + } + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI response not accepted: 0x%x", + plogi_rsp->els.fl_cmd); + atomic64_inc(&iport->iport_stats.fabric_plogi_misc_rejects); + break; + } +} + +static void fdls_process_fdmi_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + u64 fdmi_tov; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (iport->active_oxid_fdmi_plogi != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fdmi_plogi); + return; + } + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + + if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) { + del_timer_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process fdmi PLOGI response status: ELS_LS_ACC\n"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Sending fdmi registration for port 0x%x\n", + iport->fcid); + + fdls_fdmi_register_hba(iport); + fdls_fdmi_register_pa(iport); + fdmi_tov = jiffies + msecs_to_jiffies(5000); + mod_timer(&iport->fabric.fdmi_timer, + round_jiffies(fdmi_tov)); + break; + case ELS_LS_RJT: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Fabric FDMI PLOGI returned ELS_LS_RJT reason: 0x%x", + els_rjt->rej.er_reason); + + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (iport->fabric.fdmi_retry < 7)) { + iport->fabric.fdmi_retry++; + fdls_send_fdmi_plogi(iport); + } + break; + default: + break; + } + } +} +static void fdls_process_fdmi_reg_ack(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, + int rsp_type) +{ + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + if (!iport->fabric.fdmi_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received FDMI ack while not waiting: 0x%x\n", + FNIC_STD_GET_OX_ID(fchdr)); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + + if ((iport->active_oxid_fdmi_rhba != oxid) && + (iport->active_oxid_fdmi_rpa != oxid)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. oxid recvd: 0x%x, active oxids(rhba,rpa): 0x%x, 0x%x\n", + oxid, iport->active_oxid_fdmi_rhba, iport->active_oxid_fdmi_rpa); + return; + } + if (FNIC_FRAME_TYPE(oxid) == FNIC_FRAME_TYPE_FDMI_RHBA) { + iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + } else { + iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Received FDMI registration ack\n", + iport->fcid); + + if (!iport->fabric.fdmi_pending) { + del_timer_sync(&iport->fabric.fdmi_timer); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Canceling FDMI timer\n", + iport->fcid); + } +} + +static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + s_id = ntoh24(FNIC_STD_GET_S_ID(fchdr)); + + if (!(s_id != FC_FID_MGMT_SERV)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid SID: 0x%x. Dropping frame", + s_id); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + + switch (FNIC_FRAME_TYPE(oxid)) { + case FNIC_FRAME_TYPE_FDMI_PLOGI: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received FDMI PLOGI ABTS rsp with oxid: 0x%x", oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING; + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + break; + case FNIC_FRAME_TYPE_FDMI_RHBA: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received FDMI RHBA ABTS rsp with oxid: 0x%x", oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING; + + /* If RPA is still pending, don't turn off ABORT PENDING. + * We count on the timer to detect the ABTS timeout and take + * corrective action. + */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING)) + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + break; + case FNIC_FRAME_TYPE_FDMI_RPA: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received FDMI RPA ABTS rsp with oxid: 0x%x", oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING; + + /* If RHBA is still pending, don't turn off ABORT PENDING. + * We count on the timer to detect the ABTS timeout and take + * corrective action. + */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)) + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: iport->fabric.fdmi_pending: 0x%x", + iport->fcid, iport->fabric.fdmi_pending); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid oxid: 0x%x. Dropping frame", + oxid); + break; + } + + /* + * Only if ABORT PENDING is off, delete the timer, and if no other + * operations are pending, retry FDMI. + * Otherwise, let the timer pop and take the appropriate action. + */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) { + timer_delete_sync(&iport->fabric.fdmi_timer); + if (!iport->fabric.fdmi_pending) + fdls_fdmi_retry_plogi(iport); + } +} + +static void +fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr; + struct fc_std_abts_ba_rjt *ba_rjt; + uint32_t fabric_state = iport->fabric.state; + struct fnic *fnic = iport->fnic; + int frame_type; + uint16_t oxid; + + s_id = ntoh24(fchdr->fh_s_id); + ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr; + + if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI) + || (s_id == FC_FID_FCTRL))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid SID: 0x%x. Dropping frame", + s_id); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid oxid: 0x%x. Dropping frame", + oxid); + return; + } + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x", + fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id)); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x", + fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr), + ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan); + } + + frame_type = FNIC_FRAME_TYPE(oxid); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */ + switch (frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + if (iport->fabric.retry_counter < iport->max_flogi_retries) + fdls_send_fabric_flogi(iport); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max FLOGI retries"); + break; + case FNIC_FRAME_TYPE_FABRIC_LOGO: + if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY) + fdls_send_fabric_logo(iport); + break; + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + if (iport->fabric.retry_counter < iport->max_plogi_retries) + fdls_send_fabric_plogi(iport); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max PLOGI retries"); + break; + case FNIC_FRAME_TYPE_FABRIC_RPN: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_rpn_id(iport); + else + /* go back to fabric Plogi */ + fnic_fdls_start_plogi(iport); + break; + case FNIC_FRAME_TYPE_FABRIC_SCR: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_scr(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_RFT: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_register_fc4_types(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFT exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_RFF: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_register_fc4_features(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT) + fdls_send_gpn_ft(iport, fabric_state); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN FT exhausted retries. Start fabric PLOGI %p", + iport); + break; + default: + /* + * We should not be here since we already validated rx oxid with + * our active_oxid_fabric_req + */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Invalid OXID/active oxid 0x%x\n", oxid); + WARN_ON(true); + return; + } +} + +static void +fdls_process_abts_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_abts_ba_acc *pba_acc; + uint32_t nport_id; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_abts_ba_acc); + + nport_id = ntoh24(fchdr->fh_s_id); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abort from SID 0x%8x", nport_id); + + tport = fnic_find_tport_by_fcid(iport, nport_id); + if (tport) { + if (tport->active_oxid == oxid) { + tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED; + fdls_free_oxid(iport, oxid, &tport->active_oxid); + } + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate frame to send response for ABTS req", + iport->fcid); + return; + } + + pba_acc = (struct fc_std_abts_ba_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pba_acc = (struct fc_std_abts_ba_acc) { + .fchdr = {.fh_r_ctl = FC_RCTL_BA_ACC, + .fh_f_ctl = {FNIC_FCP_RSP_FCTL, 0, 0}}, + .acc = {.ba_low_seq_cnt = 0, .ba_high_seq_cnt = cpu_to_be16(0xFFFF)} + }; + + FNIC_STD_SET_S_ID(pba_acc->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pba_acc->fchdr, fchdr->fh_s_id); + FNIC_STD_SET_OX_ID(pba_acc->fchdr, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_STD_SET_RX_ID(pba_acc->fchdr, FNIC_STD_GET_RX_ID(fchdr)); + + pba_acc->acc.ba_rx_id = cpu_to_be16(FNIC_STD_GET_RX_ID(fchdr)); + pba_acc->acc.ba_ox_id = cpu_to_be16(FNIC_STD_GET_OX_ID(fchdr)); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send BA ACC with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_unsupported_els_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_els_rjt_rsp *pls_rsp; + uint16_t oxid; + uint32_t d_id = ntoh24(fchdr->fh_d_id); + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + + if (iport->fcid != d_id) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping unsupported ELS with illegal frame bits 0x%x\n", + d_id); + atomic64_inc(&iport->iport_stats.unsupported_frames_dropped); + return; + } + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping unsupported ELS request in iport state: %d", + iport->state); + atomic64_inc(&iport->iport_stats.unsupported_frames_dropped); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send response to unsupported ELS request"); + return; + } + + pls_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(frame, iport); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Process unsupported ELS request from SID: 0x%x", + iport->fcid, ntoh24(fchdr->fh_s_id)); + + /* We don't support this ELS request, send a reject */ + pls_rsp->rej.er_reason = 0x0B; + pls_rsp->rej.er_explan = 0x0; + pls_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(pls_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pls_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pls_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_rls_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_rls_acc *prls_acc_rsp; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rls_acc); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process RLS request %d", iport->fnic->fnic_num); + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RLS req in iport state: %d. Dropping the frame.", + iport->state); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RLS accept"); + return; + } + prls_acc_rsp = (struct fc_std_rls_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + FNIC_STD_SET_S_ID(prls_acc_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(prls_acc_rsp->fchdr, fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(prls_acc_rsp->fchdr, oxid); + FNIC_STD_SET_RX_ID(prls_acc_rsp->fchdr, FNIC_UNASSIGNED_RXID); + + FNIC_STD_SET_F_CTL(prls_acc_rsp->fchdr, FNIC_ELS_REP_FCTL << 16); + FNIC_STD_SET_R_CTL(prls_acc_rsp->fchdr, FC_RCTL_ELS_REP); + FNIC_STD_SET_TYPE(prls_acc_rsp->fchdr, FC_TYPE_ELS); + + prls_acc_rsp->els.rls_cmd = ELS_LS_ACC; + prls_acc_rsp->els.rls_lesb.lesb_link_fail = + cpu_to_be32(iport->fnic->link_down_cnt); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_els_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr, + uint32_t len) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *pels_acc; + uint16_t oxid; + uint8_t *fc_payload; + uint8_t type; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); + type = *fc_payload; + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping ELS frame type: 0x%x in iport state: %d", + type, iport->state); + return; + } + switch (type) { + case ELS_ECHO: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for ECHO request %d\n", + iport->fnic->fnic_num); + break; + + case ELS_RRQ: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for RRQ request %d\n", + iport->fnic->fnic_num); + break; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for 0x%x ELS frame\n", type); + break; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send ELS response for 0x%x", + type); + return; + } + + if (type == ELS_ECHO) { + /* Brocade sends a longer payload, copy all frame back */ + memcpy(frame, fchdr, len); + } + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(pels_acc->fchdr, fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid); + + if (type == ELS_ECHO) + frame_size += len; + else + frame_size += sizeof(struct fc_std_els_acc_rsp); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_tgt_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fnic_tport_s *tport; + uint32_t tport_state; + struct fc_std_abts_ba_acc *ba_acc; + struct fc_std_abts_ba_rjt *ba_rjt; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + int frame_type; + + s_id = ntoh24(fchdr->fh_s_id); + ba_acc = (struct fc_std_abts_ba_acc *)fchdr; + ba_rjt = (struct fc_std_abts_ba_rjt *)fchdr; + + tport = fnic_find_tport_by_fcid(iport, s_id); + if (!tport) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt abts rsp with invalid SID: 0x%x", s_id); + return; + } + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport 0x%p Canceling fabric disc timer\n", tport); + fnic_del_tport_timer_sync(fnic, tport); + } + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt abts rsp in iport state(%d). Dropping.", + iport->state); + return; + } + tport->timer_pending = 0; + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + tport_state = tport->state; + oxid = FNIC_STD_GET_OX_ID(fchdr); + + /*This abort rsp is for ADISC */ + frame_type = FNIC_FRAME_TYPE(oxid); + switch (frame_type) { + case FNIC_FRAME_TYPE_TGT_ADISC: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "OX_ID: 0x%x tgt_fcid: 0x%x rcvd tgt adisc abts resp BA_ACC", + be16_to_cpu(ba_acc->acc.ba_ox_id), + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC BA_RJT rcvd tport_fcid: 0x%x tport_state: %d ", + tport->fcid, tport_state); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation:0x%x ", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < FDLS_RETRY_COUNT) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_adisc(iport, tport); + return; + } + fdls_free_oxid(iport, oxid, &tport->active_oxid); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC not responding. Deleting target port: 0x%x", + tport->fcid); + fdls_delete_tport(iport, tport); + /* Restart discovery of targets */ + if ((iport->state == FNIC_IPORT_STATE_READY) + && (iport->fabric.state != FDLS_STATE_SEND_GPNFT) + && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) { + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + break; + case FNIC_FRAME_TYPE_TGT_PLOGI: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt PLOGI abts response BA_ACC tgt_fcid: 0x%x", + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x", + tport->fcid, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation: 0x%x", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < iport->max_plogi_retries) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + return; + } + + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_delete_tport(iport, tport); + /* Restart discovery of targets */ + if ((iport->state == FNIC_IPORT_STATE_READY) + && (iport->fabric.state != FDLS_STATE_SEND_GPNFT) + && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) { + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + break; + case FNIC_FRAME_TYPE_TGT_PRLI: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Received tgt PRLI abts response BA_ACC", + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x ", + tport->fcid, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation: 0x%x", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < FDLS_RETRY_COUNT) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_prli(iport, tport); + return; + } + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); /* go back to plogi */ + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS response for unknown frame %p", iport); + break; + } + +} + +static void +fdls_process_plogi_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_els_rjt_rsp *pplogi_rsp; + uint16_t oxid; + uint32_t d_id = ntoh24(fchdr->fh_d_id); + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + + if (iport->fcid != d_id) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received PLOGI with illegal frame bits. Dropping frame from 0x%x", + d_id); + return; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received PLOGI request in iport state: %d Dropping frame", + iport->state); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send response to PLOGI request"); + return; + } + + pplogi_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(frame, iport); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Process PLOGI request from SID: 0x%x", + iport->fcid, ntoh24(fchdr->fh_s_id)); + + /* We don't support PLOGI request, send a reject */ + pplogi_rsp->rej.er_reason = 0x0B; + pplogi_rsp->rej.er_explan = 0x0; + pplogi_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(pplogi_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pplogi_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pplogi_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_logo_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + struct fc_std_logo *logo = (struct fc_std_logo *)fchdr; + uint32_t nport_id; + uint64_t nport_name; + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + nport_id = ntoh24(logo->els.fl_n_port_id); + nport_name = be64_to_cpu(logo->els.fl_n_port_wwn); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process LOGO request from fcid: 0x%x", nport_id); + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dropping LOGO req from 0x%x in iport state: %d", + nport_id, iport->state); + return; + } + + tport = fnic_find_tport_by_fcid(iport, nport_id); + + if (!tport) { + /* We are not logged in with the nport, log and drop... */ + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received LOGO from an nport not logged in: 0x%x(0x%llx)", + nport_id, nport_name); + return; + } + if (tport->fcid != nport_id) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received LOGO with invalid target port fcid: 0x%x(0x%llx)", + nport_id, nport_name); + return; + } + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + /* got a logo in response to adisc to a target which has logged out */ + if (tport->state == FDLS_TGT_STATE_ADISC) { + tport->retry_counter = 0; + oxid = tport->active_oxid; + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_delete_tport(iport, tport); + fdls_send_logo_resp(iport, &logo->fchdr); + if ((iport->state == FNIC_IPORT_STATE_READY) + && (fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) + && (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Sending GPNFT in response to LOGO from Target:0x%x", + nport_id); + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + return; + } + } else { + fdls_delete_tport(iport, tport); + } + if (iport->state == FNIC_IPORT_STATE_READY) { + fdls_send_logo_resp(iport, &logo->fchdr); + if ((fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) && + (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Sending GPNFT in response to LOGO from Target:0x%x", + nport_id); + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + } +} + +static void +fdls_process_rscn(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + struct fc_std_rscn *rscn; + struct fc_els_rscn_page *rscn_port = NULL; + int num_ports; + struct fnic_tport_s *tport, *next; + uint32_t nport_id; + uint8_t fcid[3]; + int newports = 0; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + int rscn_type = NOT_PC_RSCN; + uint32_t sid = ntoh24(fchdr->fh_s_id); + unsigned long reset_fnic_list_lock_flags = 0; + uint16_t rscn_payload_len; + + atomic64_inc(&iport->iport_stats.num_rscns); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process RSCN %p", iport); + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS RSCN received in state(%d). Dropping", + fdls_get_state(fdls)); + return; + } + + rscn = (struct fc_std_rscn *)fchdr; + rscn_payload_len = be16_to_cpu(rscn->els.rscn_plen); + + /* frame validation */ + if ((rscn_payload_len % 4 != 0) || (rscn_payload_len < 8) + || (rscn_payload_len > 1024) + || (rscn->els.rscn_page_len != 4)) { + num_ports = 0; + if ((rscn_payload_len == 0xFFFF) + && (sid == FC_FID_FCTRL)) { + rscn_type = PC_RSCN; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "pcrscn: PCRSCN received. sid: 0x%x payload len: 0x%x", + sid, rscn_payload_len); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN payload_len: 0x%x page_len: 0x%x", + rscn_payload_len, rscn->els.rscn_page_len); + /* if this happens then we need to send ADISC to all the tports. */ + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN for port id: 0x%x", tport->fcid); + } + } /* end else */ + } else { + num_ports = (rscn_payload_len - 4) / rscn->els.rscn_page_len; + rscn_port = (struct fc_els_rscn_page *)(rscn + 1); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN received for num_ports: %d payload_len: %d page_len: %d ", + num_ports, rscn_payload_len, rscn->els.rscn_page_len); + + /* + * RSCN have at least one Port_ID page , but may not have any port_id + * in it. If no port_id is specified in the Port_ID page , we send + * ADISC to all the tports + */ + + while (num_ports) { + + memcpy(fcid, rscn_port->rscn_fid, 3); + + nport_id = ntoh24(fcid); + rscn_port++; + num_ports--; + /* if this happens then we need to send ADISC to all the tports. */ + if (nport_id == 0) { + list_for_each_entry_safe(tport, next, &iport->tport_list, + links) { + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN for port id: 0x%x", tport->fcid); + } + break; + } + tport = fnic_find_tport_by_fcid(iport, nport_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN port id list: 0x%x", nport_id); + + if (!tport) { + newports++; + continue; + } + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + } + + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON && + rscn_type == PC_RSCN && fnic->role == FNIC_ROLE_FCP_INITIATOR) { + + if (fnic->pc_rscn_handling_status == PC_RSCN_HANDLING_IN_PROGRESS) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PCRSCN handling already in progress. Skip host reset: %d", + iport->fnic->fnic_num); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Processing PCRSCN. Queuing fnic for host reset: %d", + iport->fnic->fnic_num); + fnic->pc_rscn_handling_status = PC_RSCN_HANDLING_IN_PROGRESS; + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + + spin_lock_irqsave(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + list_add_tail(&fnic->links, &reset_fnic_list); + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + + queue_work(reset_fnic_work_queue, &reset_fnic_work); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process RSCN sending GPN_FT: newports: %d", newports); + fdls_send_gpn_ft(iport, FDLS_STATE_RSCN_GPN_FT); + fdls_send_rscn_resp(iport, fchdr); + } +} + +void fnic_fdls_disc_start(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + + fc_host_fabric_name(iport->fnic->host) = 0; + fc_host_post_event(iport->fnic->host, fc_get_event_number(), + FCH_EVT_LIPRESET, 0); + + if (!iport->usefip) { + if (iport->flags & FNIC_FIRST_LINK_UP) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_scsi_fcpio_reset(iport->fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + iport->flags &= ~FNIC_FIRST_LINK_UP; + } + fnic_fdls_start_flogi(iport); + } else + fnic_fdls_start_plogi(iport); +} + +static void +fdls_process_adisc_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_els_adisc *padisc_acc; + struct fc_std_els_adisc *adisc_req = (struct fc_std_els_adisc *)fchdr; + uint64_t frame_wwnn; + uint64_t frame_wwpn; + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint8_t *rjt_frame; + uint8_t *acc_frame; + struct fc_std_els_rjt_rsp *prjts_rsp; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t rjt_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + uint16_t acc_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_adisc); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process ADISC request %d", iport->fnic->fnic_num); + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport for fcid: 0x%x not found. Dropping ADISC req.", + tgt_fcid); + return; + } + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dropping ADISC req from fcid: 0x%x in iport state: %d", + tgt_fcid, iport->state); + return; + } + + frame_wwnn = be64_to_cpu(adisc_req->els.adisc_wwnn); + frame_wwpn = be64_to_cpu(adisc_req->els.adisc_wwpn); + + if ((frame_wwnn != tport->wwnn) || (frame_wwpn != tport->wwpn)) { + /* send reject */ + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC req from fcid: 0x%x mismatch wwpn: 0x%llx wwnn: 0x%llx", + tgt_fcid, frame_wwpn, frame_wwnn); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "local tport wwpn: 0x%llx wwnn: 0x%llx. Sending RJT", + tport->wwpn, tport->wwnn); + + rjt_frame = fdls_alloc_frame(iport); + if (rjt_frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate rjt_frame to send response to ADISC request"); + return; + } + + prjts_rsp = (struct fc_std_els_rjt_rsp *) (rjt_frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(rjt_frame, iport); + + prjts_rsp->rej.er_reason = 0x03; /* logical error */ + prjts_rsp->rej.er_explan = 0x1E; /* N_port login required */ + prjts_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(prjts_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(prjts_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(prjts_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, rjt_frame, rjt_frame_size); + return; + } + + acc_frame = fdls_alloc_frame(iport); + if (acc_frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send ADISC accept"); + return; + } + + padisc_acc = (struct fc_std_els_adisc *) (acc_frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + FNIC_STD_SET_S_ID(padisc_acc->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(padisc_acc->fchdr, fchdr->fh_s_id); + + FNIC_STD_SET_F_CTL(padisc_acc->fchdr, FNIC_ELS_REP_FCTL << 16); + FNIC_STD_SET_R_CTL(padisc_acc->fchdr, FC_RCTL_ELS_REP); + FNIC_STD_SET_TYPE(padisc_acc->fchdr, FC_TYPE_ELS); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(padisc_acc->fchdr, oxid); + FNIC_STD_SET_RX_ID(padisc_acc->fchdr, FNIC_UNASSIGNED_RXID); + + padisc_acc->els.adisc_cmd = ELS_LS_ACC; + + FNIC_STD_SET_NPORT_NAME(&padisc_acc->els.adisc_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&padisc_acc->els.adisc_wwnn, + iport->wwnn); + memcpy(padisc_acc->els.adisc_port_id, fchdr->fh_d_id, 3); + + fnic_send_fcoe_frame(iport, acc_frame, acc_frame_size); +} + +/* + * Performs a validation for all FCOE frames and return the frame type + */ +int +fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t type; + uint8_t *fc_payload; + uint16_t oxid; + uint32_t s_id; + uint32_t d_id; + struct fnic *fnic = iport->fnic; + struct fnic_fdls_fabric_s *fabric = &iport->fabric; + int oxid_frame_type; + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); + type = *fc_payload; + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + + /* some common validation */ + if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) { + if (iport->fcid != d_id || (!FNIC_FC_FRAME_CS_CTL(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "invalid frame received. Dropping frame"); + return -1; + } + } + + /* BLS ABTS response */ + if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC) + || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) { + if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS invalid frame. Dropping frame"); + return -1; + + } + if (fdls_is_oxid_fabric_req(oxid)) { + if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame", + oxid, s_id); + return -1; + } + return FNIC_FABRIC_BLS_ABTS_RSP; + } else if (fdls_is_oxid_fdmi_req(oxid)) { + return FNIC_FDMI_BLS_ABTS_RSP; + } else if (fdls_is_oxid_tgt_req(oxid)) { + return FNIC_TPORT_BLS_ABTS_RSP; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame", + oxid, s_id); + return -1; + } + + /* BLS ABTS Req */ + if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS) + && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Receiving Abort Request from s_id: 0x%x", s_id); + return FNIC_BLS_ABTS_REQ; + } + + /* unsolicited requests frames */ + if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) { + switch (type) { + case ELS_LOGO: + if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) + || (!FNIC_FC_FRAME_UNSOLICITED(fchdr)) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received LOGO invalid frame. Dropping frame"); + return -1; + } + return FNIC_ELS_LOGO_REQ; + case ELS_RSCN: + if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) + || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RSCN invalid FCTL. Dropping frame"); + return -1; + } + if (s_id != FC_FID_FCTRL) + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.", + fchdr->fh_f_ctl[0], fchdr->fh_type, s_id); + return FNIC_ELS_RSCN_REQ; + case ELS_PLOGI: + return FNIC_ELS_PLOGI_REQ; + case ELS_ECHO: + return FNIC_ELS_ECHO_REQ; + case ELS_ADISC: + return FNIC_ELS_ADISC; + case ELS_RLS: + return FNIC_ELS_RLS; + case ELS_RRQ: + return FNIC_ELS_RRQ; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unsupported frame (type:0x%02x) from fcid: 0x%x", + type, s_id); + return FNIC_ELS_UNSUPPORTED_REQ; + } + } + + /* solicited response from fabric or target */ + oxid_frame_type = FNIC_FRAME_TYPE(oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid); + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_FLOGI) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_FLOGI_RSP; + + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_DIR_SERV) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_PLOGI_RSP; + + case FNIC_FRAME_TYPE_FABRIC_SCR: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_FCTRL) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_SCR_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RPN: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RPN_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RFT: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RFT_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RFF: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RFF_RSP; + + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_GPN_FT_RSP; + + case FNIC_FRAME_TYPE_FABRIC_LOGO: + return FNIC_FABRIC_LOGO_RSP; + case FNIC_FRAME_TYPE_FDMI_PLOGI: + return FNIC_FDMI_PLOGI_RSP; + case FNIC_FRAME_TYPE_FDMI_RHBA: + return FNIC_FDMI_REG_HBA_RSP; + case FNIC_FRAME_TYPE_FDMI_RPA: + return FNIC_FDMI_RPA_RSP; + case FNIC_FRAME_TYPE_TGT_PLOGI: + return FNIC_TPORT_PLOGI_RSP; + case FNIC_FRAME_TYPE_TGT_PRLI: + return FNIC_TPORT_PRLI_RSP; + case FNIC_FRAME_TYPE_TGT_ADISC: + return FNIC_TPORT_ADISC_RSP; + case FNIC_FRAME_TYPE_TGT_LOGO: + if (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping Unknown frame in tport solicited exchange range type: 0x%x.", + fchdr->fh_type); + return -1; + } + return FNIC_TPORT_LOGO_RSP; + default: + /* Drop the Rx frame and log/stats it */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Solicited response: unknown OXID: 0x%x", oxid); + return -1; + } + + return -1; +} + +void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, + int len, int fchdr_offset) +{ + struct fc_frame_header *fchdr; + uint32_t s_id = 0; + uint32_t d_id = 0; + struct fnic *fnic = iport->fnic; + int frame_type; + + fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset); + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + + fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming"); + + frame_type = + fnic_fdls_validate_and_get_frame_type(iport, fchdr); + + /*if we are in flogo drop everything else */ + if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO && + frame_type != FNIC_FABRIC_LOGO_RSP) + return; + + switch (frame_type) { + case FNIC_FABRIC_FLOGI_RSP: + fdls_process_flogi_rsp(iport, fchdr, rx_frame); + break; + case FNIC_FABRIC_PLOGI_RSP: + fdls_process_fabric_plogi_rsp(iport, fchdr); + break; + case FNIC_FDMI_PLOGI_RSP: + fdls_process_fdmi_plogi_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RPN_RSP: + fdls_process_rpn_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RFT_RSP: + fdls_process_rft_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RFF_RSP: + fdls_process_rff_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_SCR_RSP: + fdls_process_scr_rsp(iport, fchdr); + break; + case FNIC_FABRIC_GPN_FT_RSP: + fdls_process_gpn_ft_rsp(iport, fchdr, len); + break; + case FNIC_TPORT_PLOGI_RSP: + fdls_process_tgt_plogi_rsp(iport, fchdr); + break; + case FNIC_TPORT_PRLI_RSP: + fdls_process_tgt_prli_rsp(iport, fchdr); + break; + case FNIC_TPORT_ADISC_RSP: + fdls_process_tgt_adisc_rsp(iport, fchdr); + break; + case FNIC_TPORT_BLS_ABTS_RSP: + fdls_process_tgt_abts_rsp(iport, fchdr); + break; + case FNIC_TPORT_LOGO_RSP: + /* Logo response from tgt which we have deleted */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Logo response from tgt: 0x%x", + ntoh24(fchdr->fh_s_id)); + break; + case FNIC_FABRIC_LOGO_RSP: + fdls_process_fabric_logo_rsp(iport, fchdr); + break; + case FNIC_FABRIC_BLS_ABTS_RSP: + fdls_process_fabric_abts_rsp(iport, fchdr); + break; + case FNIC_FDMI_BLS_ABTS_RSP: + fdls_process_fdmi_abts_rsp(iport, fchdr); + break; + case FNIC_BLS_ABTS_REQ: + fdls_process_abts_req(iport, fchdr); + break; + case FNIC_ELS_UNSUPPORTED_REQ: + fdls_process_unsupported_els_req(iport, fchdr); + break; + case FNIC_ELS_PLOGI_REQ: + fdls_process_plogi_req(iport, fchdr); + break; + case FNIC_ELS_RSCN_REQ: + fdls_process_rscn(iport, fchdr); + break; + case FNIC_ELS_LOGO_REQ: + fdls_process_logo_req(iport, fchdr); + break; + case FNIC_ELS_RRQ: + case FNIC_ELS_ECHO_REQ: + fdls_process_els_req(iport, fchdr, len); + break; + case FNIC_ELS_ADISC: + fdls_process_adisc_req(iport, fchdr); + break; + case FNIC_ELS_RLS: + fdls_process_rls_req(iport, fchdr); + break; + case FNIC_FDMI_REG_HBA_RSP: + case FNIC_FDMI_RPA_RSP: + fdls_process_fdmi_reg_ack(iport, fchdr, frame_type); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "s_id: 0x%x d_did: 0x%x", s_id, d_id); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown FCoE frame of len: %d. Dropping frame", len); + break; + } +} + +void fnic_fdls_disc_init(struct fnic_iport_s *iport) +{ + fdls_reset_oxid_pool(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_INIT); +} + +void fnic_fdls_link_down(struct fnic_iport_s *iport) +{ + struct fnic_tport_s *tport, *next; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS processing link down", iport->fcid); + + fdls_set_state((&iport->fabric), FDLS_STATE_LINKDOWN); + iport->fabric.flags = 0; + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_scsi_fcpio_reset(iport->fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "removing rport: 0x%x", tport->fcid); + fdls_delete_tport(iport, tport); + } + + if (fnic_fdmi_support == 1) { + if (iport->fabric.fdmi_pending > 0) { + timer_delete_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + } + iport->flags &= ~FNIC_FDMI_ACTIVE; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS finish processing link down", iport->fcid); +} diff --git a/drivers/scsi/fnic/fdls_fc.h b/drivers/scsi/fnic/fdls_fc.h new file mode 100644 index 0000000000000..012f43afd083c --- /dev/null +++ b/drivers/scsi/fnic/fdls_fc.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FDLS_FC_H_ +#define _FDLS_FC_H_ + +/* This file contains the declarations for FC fabric services + * and target discovery + * + * Request and Response for + * 1. FLOGI + * 2. PLOGI to Fabric Controller + * 3. GPN_ID, GPN_FT + * 4. RSCN + * 5. PLOGI to Target + * 6. PRLI to Target + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FDLS_MIN_FRAMES (32) +#define FDLS_MIN_FRAME_ELEM (4) +#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002 +#define FNIC_FCP_SP_TARGET 0x00000010 +#define FNIC_FCP_SP_INITIATOR 0x00000020 +#define FNIC_FCP_SP_CONF_CMPL 0x00000080 +#define FNIC_FCP_SP_RETRY 0x00000100 + +#define FNIC_FC_CONCUR_SEQS (0xFF) +#define FNIC_FC_RO_INFO (0x1F) + +/* Little Endian */ +#define FNIC_UNASSIGNED_OXID (0xffff) +#define FNIC_UNASSIGNED_RXID (0xffff) +#define FNIC_ELS_REQ_FCTL (0x000029) +#define FNIC_ELS_REP_FCTL (0x000099) + +#define FNIC_FCP_RSP_FCTL (0x000099) +#define FNIC_REQ_ABTS_FCTL (0x000009) + +#define FNIC_FC_PH_VER_HI (0x20) +#define FNIC_FC_PH_VER_LO (0x20) +#define FNIC_FC_PH_VER (0x2020) +#define FNIC_FC_B2B_CREDIT (0x0A) +#define FNIC_FC_B2B_RDF_SZ (0x0800) + +#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data) +#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov) +#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov) +#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features)) +#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn) +#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn) + +#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \ + (FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size)) +#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \ + (FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov)) +#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \ + (FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov)) + +#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3) +#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3) +#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid)) +#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid)) + +#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl) +#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type) +#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \ + put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl)) + +#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr) +#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr) +#define FNIC_STD_SET_PORT_ID(__req, __portid) \ + memcpy(__req.fr_fid.fp_fid, __portid, 3) +#define FNIC_STD_SET_PORT_NAME(_req, _pName) \ + (put_unaligned_be64(_pName, &_req.fr_wwn)) + +#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id)) +#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id)) +#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id) +#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id) +#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type) +#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl) +#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl) + +#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd)) + +#define FNIC_FCOE_MAX_FRAME_SZ (2048) +#define FNIC_FCOE_MIN_FRAME_SZ (280) +#define FNIC_FC_MAX_PAYLOAD_LEN (2048) +#define FNIC_MIN_DATA_FIELD_SIZE (256) + +#define FNIC_FC_EDTOV_NSEC (0x400) +#define FNIC_NSEC_TO_MSEC (0x1000000) +#define FCP_PRLI_FUNC_TARGET (0x0010) + +#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21) +#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98) +#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99) +#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29) +#define FNIC_FC_R_CTL_FC4_SCTL (0x03) +#define FNIC_FC_CS_CTL (0x00) + +#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \ + (_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ) +#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \ + (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA) +#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \ + (_fchdr->fh_r_ctl == FC_RCTL_ELS_REP) +#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ) +#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT) +#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT) +#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \ + (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL) +#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS) +#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS) +#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT) +#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL) + +#define FNIC_FC_C3_RDF (0xfff) +#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \ + (min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \ + (_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF))) +#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \ + (min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \ + (uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff))) + +/* FLOGI/PLOGI struct */ +struct fc_std_flogi { + struct fc_frame_header fchdr; + struct fc_els_flogi els; +} __packed; + +struct fc_std_els_acc_rsp { + struct fc_frame_header fchdr; + struct fc_els_ls_acc acc; +} __packed; + +struct fc_std_els_rjt_rsp { + struct fc_frame_header fchdr; + struct fc_els_ls_rjt rej; +} __packed; + +struct fc_std_els_adisc { + struct fc_frame_header fchdr; + struct fc_els_adisc els; +} __packed; + +struct fc_std_rls_acc { + struct fc_frame_header fchdr; + struct fc_els_rls_resp els; +} __packed; + +struct fc_std_abts_ba_acc { + struct fc_frame_header fchdr; + struct fc_ba_acc acc; +} __packed; + +struct fc_std_abts_ba_rjt { + struct fc_frame_header fchdr; + struct fc_ba_rjt rjt; +} __packed; + +struct fc_std_els_prli { + struct fc_frame_header fchdr; + struct fc_els_prli els_prli; + struct fc_els_spp sp; +} __packed; + +struct fc_std_rpn_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rn_id rpn_id; +} __packed; + +struct fc_std_fdmi_rhba { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_fdmi_rhba rhba; +} __packed; + +struct fc_std_fdmi_rpa { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_fdmi_rpa rpa; +} __packed; + +struct fc_std_rft_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rft_id rft_id; +} __packed; + +struct fc_std_rff_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rff_id rff_id; +} __packed; + +struct fc_std_gpn_ft { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_gid_ft gpn_ft; +} __packed; + +/* Accept CT_IU for GPN_FT */ +struct fc_gpn_ft_rsp_iu { + uint8_t ctrl; + uint8_t fcid[3]; + uint32_t rsvd; + __be64 wwpn; +} __packed; + +struct fc_std_rls { + struct fc_frame_header fchdr; + struct fc_els_rls els; +} __packed; + +struct fc_std_scr { + struct fc_frame_header fchdr; + struct fc_els_scr scr; +} __packed; + +struct fc_std_rscn { + struct fc_frame_header fchdr; + struct fc_els_rscn els; +} __packed; + +struct fc_std_logo { + struct fc_frame_header fchdr; + struct fc_els_logo els; +} __packed; + +#define FNIC_ETH_FCOE_HDRS_OFFSET \ + (sizeof(struct ethhdr) + sizeof(struct fcoe_hdr)) + +#endif /* _FDLS_FC_H */ diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c new file mode 100644 index 0000000000000..7bb85949033fe --- /dev/null +++ b/drivers/scsi/fnic/fip.c @@ -0,0 +1,1005 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include "fnic.h" +#include "fip.h" +#include + +#define FIP_FNIC_RESET_WAIT_COUNT 15 + +/** + * fnic_fcoe_reset_vlans - Free up the list of discovered vlans + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan, *next; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlan_list)) { + list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reset vlan complete\n"); +} + +/** + * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u64 vlan_tov; + struct fip_vlan_req *pvlan_req; + uint16_t frame_size = sizeof(struct fip_vlan_req); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send VLAN req"); + return; + } + + fnic_fcoe_reset_vlans(fnic); + + fnic->set_vlan(fnic, 0); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "set vlan done\n"); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0], + iport->hwmac[1], iport->hwmac[2], iport->hwmac[3], + iport->hwmac[4], iport->hwmac[5]); + + pvlan_req = (struct fip_vlan_req *) frame; + *pvlan_req = (struct fip_vlan_req) { + .eth = {.h_dest = FCOE_ALL_FCFS_MAC, + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_VLAN), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, + .fip_dlen = 2}} + }; + + memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); + + iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Send VLAN req\n"); + fnic_send_fip_frame(iport, frame, frame_size); + + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov)); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fip timer set\n"); +} + +/** + * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and + * populates VLAN list. + * @fnic: Handle to fnic driver instance + * @fiph: Received FIP frame + * + * Will wait for responses from multiple FCFs until timeout. + */ +void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u16 vid; + int num_vlan = 0; + int cur_desc, desc_len; + struct fcoe_vlan *vlan; + struct fip_vlan_desc *vlan_desc; + unsigned long flags; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p got vlan resp\n", fnic); + + desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "desc_len %d\n", desc_len); + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + cur_desc = 0; + while (desc_len > 0) { + vlan_desc = + (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc) + + cur_desc * 4); + + if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) { + if (vlan_desc->fd_desc.fip_dlen != 1) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid descriptor length(%x) in VLan response\n", + vlan_desc->fd_desc.fip_dlen); + + } + num_vlan++; + vid = be16_to_cpu(vlan_desc->fd_vlan); + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + + if (!vlan) { + /* retry from timer */ + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Mem Alloc failure\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlan_list); + break; + } + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid descriptor type(%x) in VLan response\n", + vlan_desc->fd_desc.fip_dtype); + /* + * Note : received a type=2 descriptor here i.e. FIP + * MAC Address Descriptor + */ + cur_desc += vlan_desc->fd_desc.fip_dlen; + desc_len -= vlan_desc->fd_desc.fip_dlen; + } + + /* any VLAN descriptors present ? */ + if (num_vlan == 0) { + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p No VLAN descriptors in FIP VLAN response\n", + fnic); + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + out: + return; +} + +/** + * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_start_fcf_discovery(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + u64 fcs_tov; + struct fip_discovery *pdisc_sol; + uint16_t frame_size = sizeof(struct fip_discovery); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to start FCF discovery"); + return; + } + + memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); + + pdisc_sol = (struct fip_discovery *) frame; + *pdisc_sol = (struct fip_discovery) { + .eth = {.h_dest = FCOE_ALL_FCFS_MAC, + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC), + .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN), + .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, + .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}}, + .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1}, + .fd_size = cpu_to_be16(FCOE_MAX_SIZE)} + }; + + memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + iport->selected_fcf.fcf_priority = 0xFF; + + FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Start FCF discovery\n"); + fnic_send_fip_frame(iport, frame, frame_size); + + iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED; + + fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV); + mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov)); +} + +/** + * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * FCF advertisements can be: + * solicited - Sent in response of a discover FCF FIP request + * Store the information of the FCF with highest priority. + * Wait until timeout in case of multiple FCFs. + * + * unsolicited - Sent periodically by the FCF for keep alive. + * If FLOGI is in progress or completed and the advertisement is + * received by our selected FCF, refresh the keep alive timer. + */ +void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph; + u64 fcs_ka_tov; + u64 tov; + int fka_has_changed; + + switch (iport->fip.state) { + case FDLS_FIP_FCF_DISCOVERY_STARTED: + if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p Solicited adv\n", fnic); + + if ((disc_adv->prio_desc.fd_pri < + iport->selected_fcf.fcf_priority) + && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p FCF Available\n", fnic); + memcpy(iport->selected_fcf.fcf_mac, + disc_adv->mac_desc.fd_mac, ETH_ALEN); + iport->selected_fcf.fcf_priority = + disc_adv->prio_desc.fd_pri; + iport->selected_fcf.fka_adv_period = + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "adv time %d", + iport->selected_fcf.fka_adv_period); + iport->selected_fcf.ka_disabled = + (disc_adv->fka_adv_desc.fd_flags & 1); + } + } + break; + case FDLS_FIP_FLOGI_STARTED: + case FDLS_FIP_FLOGI_COMPLETE: + if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) { + /* same fcf */ + if (memcmp + (iport->selected_fcf.fcf_mac, + disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) { + if (iport->selected_fcf.fka_adv_period != + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) { + iport->selected_fcf.fka_adv_period = + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); + FNIC_FIP_DBG(KERN_INFO, + fnic->host, + fnic->fnic_num, + "change fka to %d", + iport->selected_fcf.fka_adv_period); + } + + fka_has_changed = + (iport->selected_fcf.ka_disabled == 1) + && ((disc_adv->fka_adv_desc.fd_flags & 1) == + 0); + + iport->selected_fcf.ka_disabled = + (disc_adv->fka_adv_desc.fd_flags & 1); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == + 0))) { + + fcs_ka_tov = jiffies + + 3 + * + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->fcs_ka_timer, + round_jiffies(fcs_ka_tov)); + } else { + if (timer_pending(&fnic->fcs_ka_timer)) + del_timer_sync(&fnic->fcs_ka_timer); + } + + if (fka_has_changed) { + if (iport->selected_fcf.fka_adv_period != 0) { + tov = + jiffies + + msecs_to_jiffies( + iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, + round_jiffies(tov)); + + tov = + jiffies + + msecs_to_jiffies + (FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, + round_jiffies(tov)); + } + } + } + } + break; + default: + break; + } /* end switch */ +} + +/** + * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_start_flogi(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + struct fip_flogi *pflogi_req; + u64 flogi_tov; + uint16_t oxid; + uint16_t frame_size = sizeof(struct fip_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to start FIP FLOGI"); + return; + } + + pflogi_req = (struct fip_flogi *) frame; + *pflogi_req = (struct fip_flogi) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_LS), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN), + .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, + .flogi_desc = { + .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36}, + .flogi = { + .fchdr = { + .fh_r_ctl = FC_RCTL_ELS_REQ, + .fh_d_id = {0xFF, 0xFF, 0xFE}, + .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els = { + .fl_cmd = ELS_FLOGI, + .fl_csp = { + .sp_hi_ver = + FNIC_FC_PH_VER_HI, + .sp_lo_ver = + FNIC_FC_PH_VER_LO, + .sp_bb_cred = + cpu_to_be16 + (FNIC_FC_B2B_CREDIT), + .sp_bb_data = + cpu_to_be16 + (FNIC_FC_B2B_RDF_SZ)}, + .fl_cssp[2].cp_class = + cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) + }, + } + }, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} + }; + + memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN); + if (iport->usefip) + memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac, + ETH_ALEN); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate OXID to send FIP FLOGI"); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid); + + FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn, + iport->wwnn); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FIP start FLOGI\n"); + fnic_send_fip_frame(iport, frame, frame_size); + iport->fip.flogi_retry++; + + iport->fip.state = FDLS_FIP_FLOGI_STARTED; + flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout); + mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov)); +} + +/** + * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * If successful save assigned fc_id and MAC, program firmware + * and start fdls discovery, else restart vlan discovery. + */ +void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph; + int desc_len; + uint32_t s_id; + int frame_type; + uint16_t oxid; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p FIP FLOGI rsp\n", fnic); + desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len); + if (desc_len != 38) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Invalid Descriptor List len (%x). Dropping frame\n", + desc_len); + return; + } + + if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7) + && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36)) + || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2) + && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame invalid type and len mix\n"); + return; + } + + frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr); + + s_id = ntoh24(fchdr->fh_s_id); + if ((fchdr->fh_f_ctl[0] != 0x98) + || (fchdr->fh_r_ctl != 0x23) + || (s_id != FC_FID_FLOGI) + || (frame_type != FNIC_FABRIC_FLOGI_RSP) + || (fchdr->fh_type != 0x01)) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n", + s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl, + fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + return; + } + + if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p rsp for pending FLOGI\n", fnic); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + del_timer_sync(&fnic->retry_fip_timer); + + if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN) + && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p FLOGI success\n", fnic); + memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN); + iport->fcid = + ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id); + + iport->r_a_tov = + be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov); + iport->e_d_tov = + be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov); + memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac, + ETH_ALEN); + vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac); + + if (fnic_fdls_register_portid(iport, iport->fcid, NULL) + != 0) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p flogi registration failed\n", + fnic); + return; + } + + iport->fip.state = FDLS_FIP_FLOGI_COMPLETE; + iport->state = FNIC_IPORT_STATE_FABRIC_DISC; + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "iport->state:%d\n", + iport->state); + fnic_fdls_disc_start(iport); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0))) { + u64 tov; + + tov = jiffies + + + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, + round_jiffies(tov)); + + tov = + jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, + round_jiffies(tov)); + + } + } else { + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + + iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; + } + } +} + +/** + * fnic_common_fip_cleanup - Clean up FCF info and timers in case of + * link down/CVL + * @fnic: Handle to fnic driver instance + */ +void fnic_common_fip_cleanup(struct fnic *fnic) +{ + + struct fnic_iport_s *iport = &fnic->iport; + + if (!iport->usefip) + return; + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fip cleanup\n", fnic); + + iport->fip.state = FDLS_FIP_INIT; + + del_timer_sync(&fnic->retry_fip_timer); + del_timer_sync(&fnic->fcs_ka_timer); + del_timer_sync(&fnic->enode_ka_timer); + del_timer_sync(&fnic->vn_ka_timer); + + if (!is_zero_ether_addr(iport->fpma)) + vnic_dev_del_addr(fnic->vdev, iport->fpma); + + memset(iport->fpma, 0, ETH_ALEN); + iport->fcid = 0; + iport->r_a_tov = 0; + iport->e_d_tov = 0; + memset(fnic->iport.fcfmac, 0, ETH_ALEN); + memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); + iport->selected_fcf.fcf_priority = 0; + iport->selected_fcf.fka_adv_period = 0; + iport->selected_fcf.ka_disabled = 0; + + fnic_fcoe_reset_vlans(fnic); +} + +/** + * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * Verify that cvl is received from our current FCF for our assigned MAC + * and clean up and restart the vlan discovery. + */ +void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph; + int i; + int found = false; + int max_count = 0; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p clear virtual link handler\n", fnic); + + if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2) + && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2)) + || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4) + && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "invalid mix: ft %x fl %x ndt %x ndl %x", + cvl_msg->fcf_mac_desc.fd_desc.fip_dtype, + cvl_msg->fcf_mac_desc.fd_desc.fip_dlen, + cvl_msg->name_desc.fd_desc.fip_dtype, + cvl_msg->name_desc.fd_desc.fip_dlen); + } + + if (memcmp + (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN) + == 0) { + for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) { + if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11) + && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid type and len mix type: %d len: %d\n", + cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype, + cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen); + } + if (memcmp + (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac, + ETH_ALEN) == 0) { + found = true; + break; + } + } + if (!found) + return; + fnic_common_fip_cleanup(fnic); + + while (fnic->reset_in_progress == IN_PROGRESS) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(5000)); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + max_count++; + if (max_count >= FIP_FNIC_RESET_WAIT_COUNT) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Rthr waited too long. Skipping handle link event %p\n", + fnic); + return; + } + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic reset in progress. Link event needs to wait %p", + fnic); + } + fnic->reset_in_progress = IN_PROGRESS; + fnic_fdls_link_down(iport); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); + fnic_fcoe_send_vlan_req(fnic); + } +} + +/** + * fdls_fip_recv_frame - Demultiplexer for FIP frames + * @fnic: Handle to fnic driver instance + * @frame: Received ethernet frame + */ +int fdls_fip_recv_frame(struct fnic *fnic, void *frame) +{ + struct ethhdr *eth = (struct ethhdr *)frame; + struct fip_header *fiph; + u16 op; + u8 sub; + int len = 2048; + + if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) { + fiph = (struct fip_header *)(eth + 1); + op = be16_to_cpu(fiph->fip_op); + sub = fiph->fip_subcode; + + fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming"); + + if (op == FIP_OP_DISC && sub == FIP_SC_REP) + fnic_fcoe_fip_discovery_resp(fnic, fiph); + else if (op == FIP_OP_VLAN && sub == FIP_SC_REP) + fnic_fcoe_process_vlan_resp(fnic, fiph); + else if (op == FIP_OP_CTRL && sub == FIP_SC_REP) + fnic_fcoe_process_cvl(fnic, fiph); + else if (op == FIP_OP_LS && sub == FIP_SC_REP) + fnic_fcoe_process_flogi_resp(fnic, fiph); + + /* Return true if the frame was a FIP frame */ + return true; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Not a FIP Frame"); + return false; +} + +void fnic_work_on_fip_timer(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_timer_work); + struct fnic_iport_s *iport = &fnic->iport; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FIP timeout\n"); + + if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) { + fnic_vlan_discovery_timeout(fnic); + } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) { + u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 }; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FCF Discovery timeout\n"); + if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) { + + if (iport->flags & FNIC_FIRST_LINK_UP) { + fnic_scsi_fcpio_reset(iport->fnic); + iport->flags &= ~FNIC_FIRST_LINK_UP; + } + + fnic_fcoe_start_flogi(fnic); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0))) { + u64 fcf_tov; + + fcf_tov = jiffies + + 3 + * + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->fcs_ka_timer, + round_jiffies(fcf_tov)); + } + } else { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "FCF Discovery timeout\n"); + fnic_vlan_discovery_timeout(fnic); + } + } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI timeout\n"); + if (iport->fip.flogi_retry < fnic->config.flogi_retries) + fnic_fcoe_start_flogi(fnic); + else + fnic_vlan_discovery_timeout(fnic); + } +} + +/** + * fnic_handle_fip_timer - Timeout handler for FIP discover phase. + * @t: Handle to the timer list + * + * Based on the current state, start next phase or restart discovery. + */ +void fnic_handle_fip_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, retry_fip_timer); + + INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer); + queue_work(fnic_fip_queue, &fnic->fip_timer_work); +} + +/** + * fnic_handle_enode_ka_timer - FIP node keep alive. + * @t: Handle to the timer list + */ +void fnic_handle_enode_ka_timer(struct timer_list *t) +{ + uint8_t *frame; + struct fnic *fnic = from_timer(fnic, t, enode_ka_timer); + + struct fnic_iport_s *iport = &fnic->iport; + struct fip_enode_ka *penode_ka; + u64 enode_ka_tov; + uint16_t frame_size = sizeof(struct fip_enode_ka); + + if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) + return; + + if ((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0)) { + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send enode ka"); + return; + } + + penode_ka = (struct fip_enode_ka *) frame; + *penode_ka = (struct fip_enode_ka) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_CTRL), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} + }; + + memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); + memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + + FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Handle enode KA timer\n"); + fnic_send_fip_frame(iport, frame, frame_size); + enode_ka_tov = jiffies + + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov)); +} + +/** + * fnic_handle_vn_ka_timer - FIP virtual port keep alive. + * @t: Handle to the timer list + */ +void fnic_handle_vn_ka_timer(struct timer_list *t) +{ + uint8_t *frame; + struct fnic *fnic = from_timer(fnic, t, vn_ka_timer); + + struct fnic_iport_s *iport = &fnic->iport; + struct fip_vn_port_ka *pvn_port_ka; + u64 vn_ka_tov; + uint8_t fcid[3]; + uint16_t frame_size = sizeof(struct fip_vn_port_ka); + + if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) + return; + + if ((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0)) { + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send vn ka"); + return; + } + + pvn_port_ka = (struct fip_vn_port_ka *) frame; + *pvn_port_ka = (struct fip_vn_port_ka) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_CTRL), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, + .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}} + }; + + memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN); + memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); + memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN); + hton24(fcid, iport->fcid); + memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3); + FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn); + + FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Handle vnport KA timer\n"); + fnic_send_fip_frame(iport, frame, frame_size); + vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov)); +} + +/** + * fnic_vlan_discovery_timeout - Handle vlan discovery timeout + * @fnic: Handle to fnic driver instance + * + * End of VLAN discovery or FCF discovery time window. + * Start the FCF discovery if VLAN was never used. + */ +void fnic_vlan_discovery_timeout(struct fnic *fnic) +{ + struct fcoe_vlan *vlan; + struct fnic_iport_s *iport = &fnic->iport; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (!iport->usefip) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlan_list)) { + /* no vlans available, try again */ + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + return; + } + + vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list); + + if (vlan->state == FIP_VLAN_SENT) { + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlan_list)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + fnic_fcoe_send_vlan_req(fnic); + return; + } + /* check the next vlan */ + vlan = + list_first_entry(&fnic->vlan_list, struct fcoe_vlan, + list); + + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + } else { + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_fcoe_start_fcf_discovery(fnic); +} + +/** + * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer. + * @work: the work queue to be serviced + * + * Finish handling fcs_ka_timer in process context. + * Clean up, bring the link down, and restart all FIP discovery. + */ +void fnic_work_on_fcs_ka_timer(struct work_struct *work) +{ + struct fnic + *fnic = container_of(work, struct fnic, fip_timer_work); + struct fnic_iport_s *iport = &fnic->iport; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fcs ka timeout\n", fnic); + + fnic_common_fip_cleanup(fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic_fdls_link_down(iport); + iport->state = FNIC_IPORT_STATE_FIP; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + + fnic_fcoe_send_vlan_req(fnic); +} + +/** + * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer. + * @t: Handle to the timer list + * + * No keep alives received from FCF. Clean up, bring the link down + * and restart all the FIP discovery. + */ +void fnic_handle_fcs_ka_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer); + + INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer); + queue_work(fnic_fip_queue, &fnic->fip_timer_work); +} diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h new file mode 100644 index 0000000000000..79fee76288705 --- /dev/null +++ b/drivers/scsi/fnic/fip.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FIP_H_ +#define _FIP_H_ + +#include "fdls_fc.h" +#include "fnic_fdls.h" +#include + +/* Drop the cast from the standard definition */ +#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02} +#define FCOE_MAX_SIZE 0x082E + +#define FCOE_CTLR_FIPVLAN_TOV (3*1000) +#define FCOE_CTLR_FCS_TOV (3*1000) +#define FCOE_CTLR_MAX_SOL (5*1000) + +#define FIP_DISC_SOL_LEN (6) +#define FIP_VLAN_REQ_LEN (2) +#define FIP_ENODE_KA_LEN (2) +#define FIP_VN_KA_LEN (7) +#define FIP_FLOGI_LEN (38) + +enum fdls_vlan_state { + FIP_VLAN_AVAIL, + FIP_VLAN_SENT +}; + +enum fdls_fip_state { + FDLS_FIP_INIT, + FDLS_FIP_VLAN_DISCOVERY_STARTED, + FDLS_FIP_FCF_DISCOVERY_STARTED, + FDLS_FIP_FLOGI_STARTED, + FDLS_FIP_FLOGI_COMPLETE, +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + uint16_t vid; /* vlan ID */ + uint16_t sol_count; /* no. of sols sent */ + uint16_t state; /* state */ +}; + +struct fip_vlan_req { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_vlan_notif { + struct fip_header fip; + struct fip_vlan_desc vlans_desc[]; +} __packed; + +struct fip_vn_port_ka { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; + struct fip_vn_desc vn_port_desc; +} __packed; + +struct fip_enode_ka { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_cvl { + struct fip_header fip; + struct fip_mac_desc fcf_mac_desc; + struct fip_wwn_desc name_desc; + struct fip_vn_desc vn_ports_desc[]; +} __packed; + +struct fip_flogi_desc { + struct fip_desc fd_desc; + uint16_t rsvd; + struct fc_std_flogi flogi; +} __packed; + +struct fip_flogi_rsp_desc { + struct fip_desc fd_desc; + uint16_t rsvd; + struct fc_std_flogi flogi; +} __packed; + +struct fip_flogi { + struct ethhdr eth; + struct fip_header fip; + struct fip_flogi_desc flogi_desc; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_flogi_rsp { + struct fip_header fip; + struct fip_flogi_rsp_desc rsp_desc; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_discovery { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; + struct fip_wwn_desc name_desc; + struct fip_size_desc fcoe_desc; +} __packed; + +struct fip_disc_adv { + struct fip_header fip; + struct fip_pri_desc prio_desc; + struct fip_mac_desc mac_desc; + struct fip_wwn_desc name_desc; + struct fip_fab_desc fabric_desc; + struct fip_fka_desc fka_adv_desc; +} __packed; + +void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_work_on_fip_timer(struct work_struct *work); +void fnic_work_on_fcs_ka_timer(struct work_struct *work); +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +void fnic_fcoe_start_fcf_discovery(struct fnic *fnic); +void fnic_fcoe_start_flogi(struct fnic *fnic); +void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph); +void fnic_vlan_discovery_timeout(struct fnic *fnic); + +extern struct workqueue_struct *fnic_fip_queue; + +#ifdef FNIC_DEBUG +static inline void +fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, + int len, char *pfx) +{ + struct fip_header *fiph = (struct fip_header *)(eth + 1); + u16 op = be16_to_cpu(fiph->fip_op); + u8 sub = fiph->fip_subcode; + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)", + pfx, op, sub, len); + + fnic_debug_dump(fnic, (uint8_t *)eth, len); +} + +#else /* FNIC_DEBUG */ + +static inline void +fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, + int len, char *pfx) {} +#endif /* FNIC_DEBUG */ + +#endif /* _FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 69f373b531328..cb662e6383f44 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -22,8 +22,10 @@ #include #include #include -#include -#include +#include +#include +#include +#include #include "fnic_io.h" #include "fnic_res.h" #include "fnic_trace.h" @@ -36,21 +38,23 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_scsi.h" +#include "fnic_fdls.h" #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.53" +#define DRV_VERSION "1.8.0.2" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " +#define FABRIC_LOGO_MAX_RETRY 3 #define DESC_CLEAN_LOW_WATERMARK 8 #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ #define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ #define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ -#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ #define FNIC_DFLT_QUEUE_DEPTH 256 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ +#define LUN0_DELAY_TIME 9 /* * Tag bits used for special requests. @@ -88,16 +92,100 @@ #define FNIC_DEV_RST_TERM_DONE BIT(20) #define FNIC_DEV_RST_ABTS_PENDING BIT(21) +#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */ +#define FNIC_FCOE_MAX_CMD_LEN 16 +/* Retry supported by rport (returned by PRLI service parameters) */ +#define FNIC_FC_RP_FLAGS_RETRY 0x1 + +/* Cisco vendor id */ +#define PCI_VENDOR_ID_CISCO 0x1137 +#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ + +/* sereno pcie switch */ +#define PCI_DEVICE_ID_CISCO_SERENO 0x004e +#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ +#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ +#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ + +/* Sereno */ +#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ +#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ +#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ +#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ +#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ +#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ +#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ + +/* Cruz */ +#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ +/* Cruz MountTian SIOC */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b +#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ +/* Cruz MountTian2 SIOC */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 +#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ + +/* Bodega */ +/* VIC 1457 PCIe mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 +#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ +/* VIC 1487 PCIe mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a +#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ +/* VIC 1440 Mezz mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 +#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ +#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ +#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ +#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ +#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ +#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ +#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ + +/* Beverly */ +#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ +#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ +#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ +#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ +#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ +#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ +#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ +#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ +#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ +#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ +#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ + +struct fnic_pcie_device { + u32 device; + u8 *desc; + u32 subsystem_device; + u8 *subsys_desc; +}; + /* - * Usage of the scsi_cmnd scratchpad. + * fnic private data per SCSI command. * These fields are locked by the hashed io_req_lock. */ -#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) -#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) -#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) -#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) -#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) -#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) +struct fnic_cmd_priv { + struct fnic_io_req *io_req; + enum fnic_ioreq_state state; + u32 flags; + u16 abts_status; + u16 lr_status; +}; + +static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) +{ + struct fnic_cmd_priv *fcmd = fnic_priv(cmd); + + return ((u64)fcmd->flags << 32) | fcmd->state; +} #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ @@ -108,7 +196,7 @@ #define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */ #define FNIC_MAX_FCP_TARGET 256 - +#define FNIC_PCI_OFFSET 2 /** * state_flags to identify host state along along with fnic's state **/ @@ -127,8 +215,38 @@ #define fnic_clear_state_flags(fnicp, st_flags) \ __fnic_set_state_flags(fnicp, st_flags, 1) +enum reset_states { + NOT_IN_PROGRESS = 0, + IN_PROGRESS, + RESET_ERROR +}; + +enum rscn_type { + NOT_PC_RSCN = 0, + PC_RSCN +}; + +enum pc_rscn_handling_status { + PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0, + PC_RSCN_HANDLING_IN_PROGRESS +}; + +enum pc_rscn_handling_feature { + PC_RSCN_HANDLING_FEATURE_OFF = 0, + PC_RSCN_HANDLING_FEATURE_ON +}; + +extern unsigned int fnic_fdmi_support; extern unsigned int fnic_log_level; extern unsigned int io_completions; +extern struct workqueue_struct *fnic_event_queue; + +extern unsigned int pc_rscn_handling_feature_flag; +extern spinlock_t reset_fnic_list_lock; +extern struct list_head reset_fnic_list; +extern struct workqueue_struct *reset_fnic_work_queue; +extern struct work_struct reset_fnic_work; + #define FNIC_MAIN_LOGGING 0x01 #define FNIC_FCS_LOGGING 0x02 @@ -143,31 +261,54 @@ do { \ } while (0); \ } while (0) -#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \ +#define FNIC_MAIN_DBG(kern_level, host, fnic_num, fmt, args...) \ FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \ - shost_printk(kern_level, host, fmt, ##args);) + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) -#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \ +#define FNIC_FCS_DBG(kern_level, host, fnic_num, fmt, args...) \ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ - shost_printk(kern_level, host, fmt, ##args);) + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) -#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \ +#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) + +#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \ FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ - shost_printk(kern_level, host, fmt, ##args);) + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) -#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \ +#define FNIC_ISR_DBG(kern_level, host, fnic_num, fmt, args...) \ FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ - shost_printk(kern_level, host, fmt, ##args);) + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) #define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \ shost_printk(kern_level, host, fmt, ##args) +#define FNIC_WQ_COPY_MAX 64 +#define FNIC_WQ_MAX 1 +#define FNIC_RQ_MAX 1 +#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX) +#define FNIC_DFLT_IO_COMPLETIONS 256 + +#define FNIC_MQ_CQ_INDEX 2 + extern const char *fnic_state_str[]; enum fnic_intx_intr_index { FNIC_INTX_WQ_RQ_COPYWQ, - FNIC_INTX_ERR, + FNIC_INTX_DUMMY, FNIC_INTX_NOTIFY, + FNIC_INTX_ERR, FNIC_INTX_INTR_MAX, }; @@ -175,7 +316,7 @@ enum fnic_msix_intr_index { FNIC_MSIX_RQ, FNIC_MSIX_WQ, FNIC_MSIX_WQ_COPY, - FNIC_MSIX_ERR_NOTIFY, + FNIC_MSIX_ERR_NOTIFY = FNIC_MSIX_WQ_COPY + FNIC_WQ_COPY_MAX, FNIC_MSIX_INTR_MAX, }; @@ -184,6 +325,7 @@ struct fnic_msix_entry { char devname[IFNAMSIZ + 11]; irqreturn_t (*isr)(int, void *); void *devid; + int irq_num; }; enum fnic_state { @@ -193,30 +335,47 @@ enum fnic_state { FNIC_IN_ETH_TRANS_FC_MODE, }; -#define FNIC_WQ_COPY_MAX 1 -#define FNIC_WQ_MAX 1 -#define FNIC_RQ_MAX 1 -#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX) -#define FNIC_DFLT_IO_COMPLETIONS 256 - struct mempool; +enum fnic_role_e { + FNIC_ROLE_FCP_INITIATOR = 0, +}; + enum fnic_evt { FNIC_EVT_START_VLAN_DISC = 1, FNIC_EVT_START_FCF_DISC = 2, FNIC_EVT_MAX, }; +struct fnic_frame_list { + /* + * Link to frame lists + */ + struct list_head links; + void *fp; + int frame_len; + int rx_ethhdr_stripped; +}; + struct fnic_event { struct list_head list; struct fnic *fnic; enum fnic_evt event; }; +struct fnic_cpy_wq { + unsigned long hw_lock_flags; + u16 active_ioreq_count; + u16 ioreq_table_size; + ____cacheline_aligned struct fnic_io_req **io_req_table; +}; + /* Per-instance private data structure */ struct fnic { - struct fc_lport *lport; - struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ + int fnic_num; + enum fnic_role_e role; + struct fnic_iport_s iport; + struct Scsi_Host *host; struct vnic_dev_bar bar0; struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; @@ -235,6 +394,10 @@ struct fnic { unsigned int wq_count; unsigned int cq_count; + struct completion reset_completion_wait; + struct mutex sgreset_mutex; + spinlock_t sgreset_lock; /* lock for sgreset */ + struct scsi_cmnd *sgreset_sc; struct dentry *fnic_stats_debugfs_host; struct dentry *fnic_stats_debugfs_file; struct dentry *fnic_reset_debugfs_file; @@ -245,25 +408,27 @@ struct fnic { u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 in_remove:1; /* fnic device in removal */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ - u32 link_events:1; /* set when we get any link event*/ - - struct completion *remove_wait; /* device remove thread blocks */ + struct completion *fw_reset_done; + u32 reset_in_progress; atomic_t in_flight; /* io counter */ bool internal_reset_inprogress; u32 _reserved; /* fill hole */ unsigned long state_flags; /* protected by host lock */ enum fnic_state state; spinlock_t fnic_lock; + unsigned long lock_flags; u16 vlan_id; /* VLAN tag including priority */ u8 data_src_addr[ETH_ALEN]; u64 fcp_input_bytes; /* internal statistic */ u64 fcp_output_bytes; /* internal statistic */ u32 link_down_cnt; + u32 soft_reset_count; int link_status; struct list_head list; + struct list_head links; struct pci_dev *pdev; struct vnic_fc_config config; struct vnic_dev *vdev; @@ -278,27 +443,40 @@ struct fnic { struct fnic_host_tag *tags; mempool_t *io_req_pool; mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES]; - spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */ + unsigned int copy_wq_base; struct work_struct link_work; struct work_struct frame_work; - struct sk_buff_head frame_queue; - struct sk_buff_head tx_queue; + struct work_struct flush_work; + struct list_head frame_queue; + struct list_head tx_queue; + mempool_t *frame_pool; + mempool_t *frame_elem_pool; + struct work_struct tport_work; + struct list_head tport_event_list; + + char subsys_desc[14]; + int subsys_desc_len; + int pc_rscn_handling_status; /*** FIP related data members -- start ***/ void (*set_vlan)(struct fnic *, u16 vlan); struct work_struct fip_frame_work; - struct sk_buff_head fip_frame_queue; + struct work_struct fip_timer_work; + struct list_head fip_frame_queue; struct timer_list fip_timer; - struct list_head vlans; spinlock_t vlans_lock; - - struct work_struct event_work; - struct list_head evlist; + struct timer_list retry_fip_timer; + struct timer_list fcs_ka_timer; + struct timer_list enode_ka_timer; + struct timer_list vn_ka_timer; + struct list_head vlan_list; /*** FIP related data members -- end ***/ /* copy work queue cache line section */ - ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; + ____cacheline_aligned struct vnic_wq_copy hw_copy_wq[FNIC_WQ_COPY_MAX]; + ____cacheline_aligned struct fnic_cpy_wq sw_copy_wq[FNIC_WQ_COPY_MAX]; + /* completion queue cache line section */ ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX]; @@ -315,44 +493,40 @@ struct fnic { ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; }; -static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) -{ - return container_of(fip, struct fnic, ctlr); -} - extern struct workqueue_struct *fnic_event_queue; extern struct workqueue_struct *fnic_fip_queue; extern struct device_attribute *fnic_attrs[]; void fnic_clear_intr_mode(struct fnic *fnic); int fnic_set_intr_mode(struct fnic *fnic); +int fnic_set_intr_mode_msix(struct fnic *fnic); void fnic_free_intr(struct fnic *fnic); int fnic_request_intr(struct fnic *fnic); -int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); +void fnic_tport_event_handler(struct work_struct *work); void fnic_handle_link(struct work_struct *work); void fnic_handle_event(struct work_struct *work); +void fdls_reclaim_oxid_handler(struct work_struct *work); +void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid); +void fdls_schedule_oxid_free_retry_work(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); -void fnic_flush_tx(struct fnic *); -void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); -void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); -void fnic_update_mac(struct fc_lport *, u8 *new); +void fnic_flush_tx(struct work_struct *work); void fnic_update_mac_locked(struct fnic *, u8 *new); int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); int fnic_abort_cmd(struct scsi_cmnd *); int fnic_device_reset(struct scsi_cmnd *); -int fnic_host_reset(struct scsi_cmnd *); -int fnic_reset(struct Scsi_Host *); -void fnic_scsi_cleanup(struct fc_lport *); -void fnic_scsi_abort_io(struct fc_lport *); -void fnic_empty_scsi_cleanup(struct fc_lport *); -void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); -int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int); +int fnic_eh_host_reset_handler(struct scsi_cmnd *sc); +int fnic_host_reset(struct Scsi_Host *shost); +void fnic_reset(struct Scsi_Host *shost); +int fnic_issue_fc_host_lip(struct Scsi_Host *shost); +void fnic_get_host_port_state(struct Scsi_Host *shost); +void fnic_scsi_fcpio_reset(struct fnic *fnic); +int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index); int fnic_wq_cmpl_handler(struct fnic *fnic, int); int fnic_flogi_reg_handler(struct fnic *fnic, u32); void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, @@ -360,17 +534,18 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, int fnic_fw_reset_handler(struct fnic *fnic); void fnic_terminate_rport_io(struct fc_rport *); const char *fnic_state_to_str(unsigned int state); - +void fnic_mq_map_queues_cpus(struct Scsi_Host *host); void fnic_log_q_error(struct fnic *fnic); void fnic_handle_link_event(struct fnic *fnic); - +int fnic_stats_debugfs_init(struct fnic *fnic); +void fnic_stats_debugfs_remove(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); void fnic_handle_fip_frame(struct work_struct *work); +void fnic_reset_work_handler(struct work_struct *work); void fnic_handle_fip_event(struct fnic *fnic); void fnic_fcoe_reset_vlans(struct fnic *fnic); -void fnic_fcoe_evlist_free(struct fnic *fnic); -extern void fnic_handle_fip_timer(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct timer_list *t); static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) @@ -379,4 +554,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) } void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +void fnic_free_txq(struct list_head *head); +int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, + char **subsys_desc); +void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); +void fnic_delete_fcp_tports(struct fnic *fnic); +void fnic_flush_tport_event_list(struct fnic *fnic); +int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); +unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid); +unsigned int fnic_count_all_ioreqs(struct fnic *fnic); +unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq, + struct scsi_device *device); +unsigned int fnic_count_lun_ioreqs(struct fnic *fnic, + struct scsi_device *device); +void fnic_scsi_unload(struct fnic *fnic); +void fnic_scsi_unload_cleanup(struct fnic *fnic); +int fnic_get_debug_info(struct stats_debug_info *info, + struct fnic *fnic); + +struct fnic_scsi_iter_data { + struct fnic *fnic; + void *data1; + void *data2; + bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2); +}; + +static inline bool +fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) +{ + struct fnic_scsi_iter_data *iter = iter_data; + + return iter->fn(iter->fnic, sc, iter->data1, iter->data2); +} + +static inline void +fnic_scsi_io_iter(struct fnic *fnic, + bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2), + void *data1, void *data2) +{ + struct fnic_scsi_iter_data iter_data = { + .fn = fn, + .fnic = fnic, + .data1 = data1, + .data2 = data2, + }; + scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data); +} + +#ifdef FNIC_DEBUG +static inline void +fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) +{ + int i; + + for (i = 0; i < len; i = i+8) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, + u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], + u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); + } +} + +static inline void +fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, + int len, char *pfx) +{ + uint32_t s_id, d_id; + + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", + pfx, s_id, d_id, fchdr->fh_type, + FNIC_STD_GET_OX_ID(fchdr), len); + + fnic_debug_dump(fnic, (uint8_t *)fchdr, len); + +} +#else /* FNIC_DEBUG */ +static inline void +fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} +static inline void +fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, + uint32_t len, char *pfx) {} +#endif /* FNIC_DEBUG */ #endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c index aea0c3becfd45..f6fef1c02ef24 100644 --- a/drivers/scsi/fnic/fnic_attrs.c +++ b/drivers/scsi/fnic/fnic_attrs.c @@ -23,25 +23,28 @@ static ssize_t fnic_show_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct fc_lport *lp = shost_priv(class_to_shost(dev)); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = + *((struct fnic **) shost_priv(class_to_shost(dev))); - return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]); + return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]); } static ssize_t fnic_show_drv_version(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); + return sysfs_emit(buf, "%s\n", DRV_VERSION); } static ssize_t fnic_show_link_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct fc_lport *lp = shost_priv(class_to_shost(dev)); + struct fnic *fnic = + *((struct fnic **) shost_priv(class_to_shost(dev))); - return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up) - ? "Link Up" : "Link Down"); + return sysfs_emit(buf, "%s\n", + ((fnic->iport.state != FNIC_IPORT_STATE_INIT) && + (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ? + "Link Up" : "Link Down"); } static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index e7326505cabb1..bd0c4f0fe201f 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -21,6 +21,9 @@ #include #include "fnic.h" +extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer, + struct fnic *fnic); + static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; @@ -66,9 +69,10 @@ int fnic_debugfs_init(void) fc_trc_flag->fnic_trace = 2; fc_trc_flag->fc_trace = 3; fc_trc_flag->fc_clear = 4; + return 0; } - return 0; + return -ENOMEM; } /* @@ -86,8 +90,7 @@ void fnic_debugfs_terminate(void) debugfs_remove(fnic_trace_debugfs_root); fnic_trace_debugfs_root = NULL; - if (fc_trc_flag) - vfree(fc_trc_flag); + vfree(fc_trc_flag); } /* @@ -216,25 +219,21 @@ static int fnic_trace_debugfs_open(struct inode *inode, return -ENOMEM; if (*rdata_ptr == fc_trc_flag->fnic_trace) { - fnic_dbg_prt->buffer = vmalloc(array3_size(3, trace_max_pages, + fnic_dbg_prt->buffer = vzalloc(array3_size(3, trace_max_pages, PAGE_SIZE)); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } - memset((void *)fnic_dbg_prt->buffer, 0, - 3 * (trace_max_pages * PAGE_SIZE)); fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); } else { fnic_dbg_prt->buffer = - vmalloc(array3_size(3, fnic_fc_trace_max_pages, + vzalloc(array3_size(3, fnic_fc_trace_max_pages, PAGE_SIZE)); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } - memset((void *)fnic_dbg_prt->buffer, 0, - 3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); fnic_dbg_prt->buffer_len = fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); } @@ -611,6 +610,7 @@ static int fnic_stats_debugfs_open(struct inode *inode, debug->buf_size = buf_size; memset((void *)debug->debug_buffer, 0, buf_size); debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + debug->buffer_len += fnic_get_debug_info(debug, fnic); file->private_data = debug; @@ -691,26 +691,25 @@ static const struct file_operations fnic_reset_debugfs_fops = { * It will create file stats and reset_stats under statistics/host# directory * to log per fnic stats. */ -void fnic_stats_debugfs_init(struct fnic *fnic) +int fnic_stats_debugfs_init(struct fnic *fnic) { char name[16]; - snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + snprintf(name, sizeof(name), "host%d", fnic->host->host_no); fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, fnic_stats_debugfs_root); - fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_stats_debugfs_fops); - fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_reset_debugfs_fops); + return 0; } /* diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 1885218f9d157..02d15f43e1b97 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -26,694 +26,379 @@ #include #include #include -#include #include -#include +#include +#include #include "fnic_io.h" #include "fnic.h" -#include "fnic_fip.h" +#include "fnic_fdls.h" +#include "fdls_fc.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +#include "fip.h" + +#define MAX_RESET_WAIT_COUNT 64 -static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; -struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; -static void fnic_set_eth_mode(struct fnic *); -static void fnic_fcoe_send_vlan_req(struct fnic *fnic); -static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); -static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); -static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); -static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; -void fnic_handle_link(struct work_struct *work) +/* + * Internal Functions + * This function will initialize the src_mac address to be + * used in outgoing frames + */ +static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, + uint8_t *src_mac) { - struct fnic *fnic = container_of(work, struct fnic, link_work); - unsigned long flags; - int old_link_status; - u32 old_link_down_cnt; - u64 old_port_speed, new_port_speed; - - spin_lock_irqsave(&fnic->fnic_lock, flags); - - fnic->link_events = 1; /* less work to just set everytime*/ - - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - old_link_down_cnt = fnic->link_down_cnt; - old_link_status = fnic->link_status; - old_port_speed = atomic64_read( - &fnic->fnic_stats.misc_stats.current_port_speed); - - fnic->link_status = vnic_dev_link_status(fnic->vdev); - fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); - - new_port_speed = vnic_dev_port_speed(fnic->vdev); - atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, - new_port_speed); - if (old_port_speed != new_port_speed) - FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, - "Current vnic speed set to : %llu\n", - new_port_speed); - - switch (vnic_dev_port_speed(fnic->vdev)) { - case DCEM_PORTSPEED_10G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; - break; - case DCEM_PORTSPEED_20G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; - break; - case DCEM_PORTSPEED_25G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; - break; - case DCEM_PORTSPEED_40G: - case DCEM_PORTSPEED_4x10G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; - break; - case DCEM_PORTSPEED_100G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; - break; - default: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; - fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; - break; - } - - if (old_link_status == fnic->link_status) { - if (!fnic->link_status) { - /* DOWN -> DOWN */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_LE, "Link Status: DOWN->DOWN", - strlen("Link Status: DOWN->DOWN")); - } else { - if (old_link_down_cnt != fnic->link_down_cnt) { - /* UP -> DOWN -> UP */ - fnic->lport->host_stats.link_failure_count++; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, - "Link Status:UP_DOWN_UP", - strlen("Link_Status:UP_DOWN_UP") - ); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "link down\n"); - fcoe_ctlr_link_down(&fnic->ctlr); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - /* start FCoE VLAN discovery */ - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, - "Link Status: UP_DOWN_UP_VLAN", - strlen( - "Link Status: UP_DOWN_UP_VLAN") - ); - fnic_fcoe_send_vlan_req(fnic); - return; - } - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "link up\n"); - fcoe_ctlr_link_up(&fnic->ctlr); - } else { - /* UP -> UP */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: UP_UP", - strlen("Link Status: UP_UP")); - } - } - } else if (fnic->link_status) { - /* DOWN -> UP */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - /* start FCoE VLAN discovery */ - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", - strlen("Link Status: DOWN_UP_VLAN")); - fnic_fcoe_send_vlan_req(fnic); - return; - } - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); - fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); - fcoe_ctlr_link_up(&fnic->ctlr); - } else { - /* UP -> DOWN */ - fnic->lport->host_stats.link_failure_count++; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: UP_DOWN", - strlen("Link Status: UP_DOWN")); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "deleting fip-timer during link-down\n"); - del_timer_sync(&fnic->fip_timer); - } - fcoe_ctlr_link_down(&fnic->ctlr); - } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", + src_mac[0], src_mac[1], src_mac[2], src_mac[3], + src_mac[4], src_mac[5]); + memcpy(fnic->iport.fpma, src_mac, 6); } /* - * This function passes incoming fabric frames to libFC + * This function will initialize the dst_mac address to be + * used in outgoing frames */ -void fnic_handle_frame(struct work_struct *work) +static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, + uint8_t *dst_mac) { - struct fnic *fnic = container_of(work, struct fnic, frame_work); - struct fc_lport *lp = fnic->lport; - unsigned long flags; - struct sk_buff *skb; - struct fc_frame *fp; - - while ((skb = skb_dequeue(&fnic->frame_queue))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", + dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], + dst_mac[4], dst_mac[5]); - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(skb); - return; - } - fp = (struct fc_frame *)skb; - - /* - * If we're in a transitional state, just re-queue and return. - * The queue will be serviced when we get to a stable state. - */ - if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_head(&fnic->frame_queue, skb); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - fc_exch_recv(lp, fp); - } + memcpy(fnic->iport.fcfmac, dst_mac, 6); } -void fnic_fcoe_evlist_free(struct fnic *fnic) +void fnic_get_host_port_state(struct Scsi_Host *shost) { - struct fnic_event *fevt = NULL; - struct fnic_event *next = NULL; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + struct fnic_iport_s *iport = &fnic->iport; unsigned long flags; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (list_empty(&fnic->evlist)) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { - list_del(&fevt->list); - kfree(fevt); - } + if (!fnic->link_status) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else if (iport->state == FNIC_IPORT_STATE_READY) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; spin_unlock_irqrestore(&fnic->fnic_lock, flags); } -void fnic_handle_event(struct work_struct *work) +void fnic_fdls_link_status_change(struct fnic *fnic, int linkup) { - struct fnic *fnic = container_of(work, struct fnic, event_work); - struct fnic_event *fevt = NULL; - struct fnic_event *next = NULL; - unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (list_empty(&fnic->evlist)) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "link up: %d, usefip: %d", linkup, iport->usefip); - list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { - if (fnic->stop_rx_link_events) { - list_del(&fevt->list); - kfree(fevt); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - /* - * If we're in a transitional state, just re-queue and return. - * The queue will be serviced when we get to a stable state. - */ - if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); - list_del(&fevt->list); - switch (fevt->event) { - case FNIC_EVT_START_VLAN_DISC: - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (linkup) { + if (iport->usefip) { + iport->state = FNIC_IPORT_STATE_FIP; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "link up: %d, usefip: %d", linkup, iport->usefip); fnic_fcoe_send_vlan_req(fnic); - spin_lock_irqsave(&fnic->fnic_lock, flags); - break; - case FNIC_EVT_START_FCF_DISC: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Start FCF Discovery\n"); - fnic_fcoe_start_fcf_disc(fnic); - break; - default: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Unknown event 0x%x\n", fevt->event); - break; + } else { + iport->state = FNIC_IPORT_STATE_FABRIC_DISC; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->state: %d", iport->state); + fnic_fdls_disc_start(iport); } - kfree(fevt); + } else { + iport->state = FNIC_IPORT_STATE_LINK_WAIT; + if (!is_zero_ether_addr(iport->fpma)) + vnic_dev_del_addr(fnic->vdev, iport->fpma); + fnic_common_fip_cleanup(fnic); + fnic_fdls_link_down(iport); + } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } -/** - * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected - * @fip: The FCoE controller that received the frame - * @skb: The received FIP frame - * - * Returns non-zero if the frame is rejected with unsupported cmd with - * insufficient resource els explanation. + +/* + * FPMA can be either taken from ethhdr(dst_mac) or flogi resp + * or derive from FC_MAP and FCID combination. While it should be + * same, revisit this if there is any possibility of not-correct. */ -static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, - struct sk_buff *skb) +void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid) { - struct fc_lport *lport = fip->lp; - struct fip_header *fiph; - struct fc_frame_header *fh = NULL; - struct fip_desc *desc; - struct fip_encaps *els; - u16 op; - u8 els_op; - u8 sub; - - size_t rlen; - size_t dlen = 0; - - if (skb_linearize(skb)) - return 0; + struct fnic *fnic = iport->fnic; + struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; + uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; - if (skb->len < sizeof(*fiph)) - return 0; + memcpy(&fcmac[3], fcid, 3); - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", + ethhdr->h_dest[0], ethhdr->h_dest[1], + ethhdr->h_dest[2], ethhdr->h_dest[3], + ethhdr->h_dest[4], ethhdr->h_dest[5]); - if (op != FIP_OP_LS) - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", + fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], + fcmac[5]); - if (sub != FIP_SC_REP) - return 0; - - rlen = ntohs(fiph->fip_dl_len) * 4; - if (rlen + sizeof(*fiph) > skb->len) - return 0; + fnic_fdls_set_fcoe_srcmac(fnic, fcmac); + fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); +} - desc = (struct fip_desc *)(fiph + 1); - dlen = desc->fip_dlen * FIP_BPW; +void fnic_fdls_init(struct fnic *fnic, int usefip) +{ + struct fnic_iport_s *iport = &fnic->iport; - if (desc->fip_dtype == FIP_DT_FLOGI) { + /* Initialize iPort structure */ + iport->state = FNIC_IPORT_STATE_INIT; + iport->fnic = fnic; + iport->usefip = usefip; - if (dlen < sizeof(*els) + sizeof(*fh) + 1) - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", + iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], + iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); - els = (struct fip_encaps *)desc; - fh = (struct fc_frame_header *)(els + 1); + INIT_LIST_HEAD(&iport->tport_list); + INIT_LIST_HEAD(&iport->tport_list_pending_del); - if (!fh) - return 0; - - /* - * ELS command code, reason and explanation should be = Reject, - * unsupported command and insufficient resource - */ - els_op = *(u8 *)(fh + 1); - if (els_op == ELS_LS_RJT) { - shost_printk(KERN_INFO, lport->host, - "Flogi Request Rejected by Switch\n"); - return 1; - } - shost_printk(KERN_INFO, lport->host, - "Flogi Request Accepted by Switch\n"); - } - return 0; + fnic_fdls_disc_init(iport); } -static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +void fnic_handle_link(struct work_struct *work) { - struct fcoe_ctlr *fip = &fnic->ctlr; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - struct sk_buff *skb; - char *eth_fr; - struct fip_vlan *vlan; - u64 vlan_tov; + struct fnic *fnic = container_of(work, struct fnic, link_work); + int old_link_status; + u32 old_link_down_cnt; + int max_count = 0; - fnic_fcoe_reset_vlans(fnic); - fnic->set_vlan(fnic, 0); + if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Interrupt mode is not MSI\n"); - if (printk_ratelimit()) - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "Sending VLAN request...\n"); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); - skb = dev_alloc_skb(sizeof(struct fip_vlan)); - if (!skb) + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Stop link rx events\n"); return; - - eth_fr = (char *)skb->data; - vlan = (struct fip_vlan *)eth_fr; - - memset(vlan, 0, sizeof(*vlan)); - memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); - memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); - vlan->eth.h_proto = htons(ETH_P_FIP); - - vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - vlan->fip.fip_op = htons(FIP_OP_VLAN); - vlan->fip.fip_subcode = FIP_SC_VL_REQ; - vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); - - vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; - vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; - memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - - vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; - vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; - put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); - atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); - - skb_put(skb, sizeof(*vlan)); - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - fip->send(fip, skb); - - /* set a timer so that we can retry if there no response */ - vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); - mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); -} - -static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) -{ - struct fcoe_ctlr *fip = &fnic->ctlr; - struct fip_header *fiph; - struct fip_desc *desc; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - u16 vid; - size_t rlen; - size_t dlen; - struct fcoe_vlan *vlan; - u64 sol_time; - unsigned long flags; - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "Received VLAN response...\n"); - - fiph = (struct fip_header *) skb->data; - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", - ntohs(fiph->fip_op), fiph->fip_subcode); - - rlen = ntohs(fiph->fip_dl_len) * 4; - fnic_fcoe_reset_vlans(fnic); - spin_lock_irqsave(&fnic->vlans_lock, flags); - desc = (struct fip_desc *)(fiph + 1); - while (rlen > 0) { - dlen = desc->fip_dlen * FIP_BPW; - switch (desc->fip_dtype) { - case FIP_DT_VLAN: - vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); - shost_printk(KERN_INFO, fnic->lport->host, - "process_vlan_resp: FIP VLAN %d\n", vid); - vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) { - /* retry from timer */ - spin_unlock_irqrestore(&fnic->vlans_lock, - flags); - goto out; - } - vlan->vid = vid & 0x0fff; - vlan->state = FIP_VLAN_AVAIL; - list_add_tail(&vlan->list, &fnic->vlans); - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; } - /* any VLAN descriptors present ? */ - if (list_empty(&fnic->vlans)) { - /* retry from timer */ - atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "No VLAN descriptors in FIP VLAN response\n"); - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - goto out; + /* Do not process if the fnic is already in transitional state */ + if ((fnic->state != FNIC_IN_ETH_MODE) + && (fnic->state != FNIC_IN_FC_MODE)) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic in transitional state: %d. link up: %d ignored", + fnic->state, vnic_dev_link_status(fnic->vdev)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Current link status: %d iport state: %d\n", + fnic->link_status, fnic->iport.state); + return; } - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - vlan->sol_count++; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - - /* start the solicitation */ - fcoe_ctlr_link_up(fip); - - sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); -out: - return; -} - -static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) -{ - unsigned long flags; - struct fcoe_vlan *vlan; - u64 sol_time; - - spin_lock_irqsave(&fnic->vlans_lock, flags); - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - vlan->sol_count = 1; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - - /* start the solicitation */ - fcoe_ctlr_link_up(&fnic->ctlr); - - sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); -} - -static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) -{ - unsigned long flags; - struct fcoe_vlan *fvlan; + old_link_down_cnt = fnic->link_down_cnt; + old_link_status = fnic->link_status; + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (list_empty(&fnic->vlans)) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return -EINVAL; + while (fnic->reset_in_progress == IN_PROGRESS) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic reset in progress. Link event needs to wait\n"); + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "waiting for reset completion\n"); + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(5000)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "woken up from reset completion wait\n"); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + max_count++; + if (max_count >= MAX_RESET_WAIT_COUNT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Rstth waited for too long. Skipping handle link event\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } } - - fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - if (fvlan->state == FIP_VLAN_USED) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Marking fnic reset in progress\n"); + fnic->reset_in_progress = IN_PROGRESS; + + if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) || + (fnic->link_status != old_link_status)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "old link status: %d link status: %d\n", + old_link_status, (int) fnic->link_status); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "old down count %d down count: %d\n", + old_link_down_cnt, (int) fnic->link_down_cnt); } - if (fvlan->state == FIP_VLAN_SENT) { - fvlan->state = FIP_VLAN_USED; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return 0; + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) { + /* DOWN -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->down\n"); + } else { + if (old_link_down_cnt != fnic->link_down_cnt) { + /* UP -> DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->down. Link down\n"); + fnic_fdls_link_status_change(fnic, 0); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->up. Link up\n"); + fnic_fdls_link_status_change(fnic, 1); + } else { + /* UP -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->up\n"); + } + } + } else if (fnic->link_status) { + /* DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->up. Link up\n"); + fnic_fdls_link_status_change(fnic, 1); + } else { + /* UP -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->down. Link down\n"); + fnic_fdls_link_status_change(fnic, 0); } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return -EINVAL; -} - -static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) -{ - struct fnic_event *fevt; - unsigned long flags; - - fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); - if (!fevt) - return; - fevt->fnic = fnic; - fevt->event = ev; - - spin_lock_irqsave(&fnic->fnic_lock, flags); - list_add_tail(&fevt->list, &fnic->evlist); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); - schedule_work(&fnic->event_work); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Marking fnic reset completion\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } -static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +void fnic_handle_frame(struct work_struct *work) { - struct fip_header *fiph; - int ret = 1; - u16 op; - u8 sub; + struct fnic *fnic = container_of(work, struct fnic, frame_work); + struct fnic_frame_list *cur_frame, *next; + int fchdr_offset = 0; - if (!skb || !(skb->data)) - return -1; + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) { + if (fnic->stop_rx_link_events) { + list_del(&cur_frame->links); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + kfree(cur_frame->fp); + mempool_free(cur_frame, fnic->frame_elem_pool); + return; + } - if (skb_linearize(skb)) - goto drop; + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Cannot process frame in transitional state\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; + list_del(&cur_frame->links); - if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) - goto drop; + /* Frames from FCP_RQ will have ethhdrs stripped off */ + fchdr_offset = (cur_frame->rx_ethhdr_stripped) ? + 0 : FNIC_ETH_FCOE_HDRS_OFFSET; - if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) - goto drop; + fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp, + cur_frame->frame_len, fchdr_offset); - if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { - if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) - goto drop; - /* pass it on to fcoe */ - ret = 1; - } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { - /* set the vlan as used */ - fnic_fcoe_process_vlan_resp(fnic, skb); - ret = 0; - } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { - /* received CVL request, restart vlan disc */ - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - /* pass it on to fcoe */ - ret = 1; + kfree(cur_frame->fp); + mempool_free(cur_frame, fnic->frame_elem_pool); } -drop: - return ret; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } void fnic_handle_fip_frame(struct work_struct *work) { + struct fnic_frame_list *cur_frame, *next; struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - unsigned long flags; - struct sk_buff *skb; - struct ethhdr *eh; - while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { - spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Processing FIP frame\n"); + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue, + links) { if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(skb); + list_del(&cur_frame->links); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + kfree(cur_frame->fp); + kfree(cur_frame); return; } + /* * If we're in a transitional state, just re-queue and return. * The queue will be serviced when we get to a stable state. */ if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_head(&fnic->fip_frame_queue, skb); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); return; } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - eh = (struct ethhdr *)skb->data; - if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { - dev_kfree_skb(skb); - continue; - } - /* - * If there's FLOGI rejects - clear all - * fcf's & restart from scratch - */ - if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { - atomic64_inc( - &fnic_stats->vlan_stats.flogi_rejects); - shost_printk(KERN_INFO, fnic->lport->host, - "Trigger a Link down - VLAN Disc\n"); - fcoe_ctlr_link_down(&fnic->ctlr); - /* start FCoE VLAN discovery */ - fnic_fcoe_send_vlan_req(fnic); - dev_kfree_skb(skb); - continue; - } - fcoe_ctlr_recv(&fnic->ctlr, skb); - continue; + + list_del(&cur_frame->links); + + if (fdls_fip_recv_frame(fnic, cur_frame->fp)) { + kfree(cur_frame->fp); + kfree(cur_frame); } } + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. - * @skb: Ethernet Frame. + * @fp: Ethernet Frame. */ -static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) +static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp) { - struct fc_frame *fp; struct ethhdr *eh; - struct fcoe_hdr *fcoe_hdr; - struct fcoe_crc_eof *ft; + struct fnic_frame_list *fip_fr_elem; + unsigned long flags; - /* - * Undo VLAN encapsulation if present. - */ - eh = (struct ethhdr *)skb->data; - if (eh->h_proto == htons(ETH_P_8021Q)) { - memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); - eh = skb_pull(skb, VLAN_HLEN); - skb_reset_mac_header(skb); - } - if (eh->h_proto == htons(ETH_P_FIP)) { - if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { - printk(KERN_ERR "Dropped FIP frame, as firmware " - "uses non-FIP mode, Enable FIP " - "using UCSM\n"); - goto drop; - } - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - skb_queue_tail(&fnic->fip_frame_queue, skb); + eh = (struct ethhdr *) fp; + if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) { + fip_fr_elem = (struct fnic_frame_list *) + kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC); + if (!fip_fr_elem) + return 0; + fip_fr_elem->fp = fp; + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); queue_work(fnic_fip_queue, &fnic->fip_frame_work); - return 1; /* let caller know packet was used */ - } - if (eh->h_proto != htons(ETH_P_FCOE)) - goto drop; - skb_set_network_header(skb, sizeof(*eh)); - skb_pull(skb, sizeof(*eh)); - - fcoe_hdr = (struct fcoe_hdr *)skb->data; - if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) - goto drop; - - fp = (struct fc_frame *)skb; - fc_frame_init(fp); - fr_sof(fp) = fcoe_hdr->fcoe_sof; - skb_pull(skb, sizeof(struct fcoe_hdr)); - skb_reset_transport_header(skb); - - ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); - fr_eof(fp) = ft->fcoe_eof; - skb_trim(skb, skb->len - sizeof(*ft)); - return 0; -drop: - dev_kfree_skb_irq(skb); - return -1; + return 1; /* let caller know packet was used */ + } else + return 0; } /** @@ -725,205 +410,147 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) */ void fnic_update_mac_locked(struct fnic *fnic, u8 *new) { - u8 *ctl = fnic->ctlr.ctl_src_addr; + struct fnic_iport_s *iport = &fnic->iport; + u8 *ctl = iport->hwmac; u8 *data = fnic->data_src_addr; if (is_zero_ether_addr(new)) new = ctl; if (ether_addr_equal(data, new)) return; - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Update MAC: %u\n", *new); + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) vnic_dev_del_addr(fnic->vdev, data); + memcpy(data, new, ETH_ALEN); if (!ether_addr_equal(new, ctl)) vnic_dev_add_addr(fnic->vdev, new); } -/** - * fnic_update_mac() - set data MAC address and filters. - * @lport: local port. - * @new: newly-assigned FCoE MAC address. - */ -void fnic_update_mac(struct fc_lport *lport, u8 *new) -{ - struct fnic *fnic = lport_priv(lport); - - spin_lock_irq(&fnic->fnic_lock); - fnic_update_mac_locked(fnic, new); - spin_unlock_irq(&fnic->fnic_lock); -} - -/** - * fnic_set_port_id() - set the port_ID after successful FLOGI. - * @lport: local port. - * @port_id: assigned FC_ID. - * @fp: received frame containing the FLOGI accept or NULL. - * - * This is called from libfc when a new FC_ID has been assigned. - * This causes us to reset the firmware to FC_MODE and setup the new MAC - * address and FC_ID. - * - * It is also called with FC_ID 0 when we're logged off. - * - * If the FC_ID is due to point-to-point, fp may be NULL. - */ -void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) -{ - struct fnic *fnic = lport_priv(lport); - u8 *mac; - int ret; - - FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", - port_id, fp); - - /* - * If we're clearing the FC_ID, change to use the ctl_src_addr. - * Set ethernet mode to send FLOGI. - */ - if (!port_id) { - fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); - fnic_set_eth_mode(fnic); - return; - } - - if (fp) { - mac = fr_cb(fp)->granted_mac; - if (is_zero_ether_addr(mac)) { - /* non-FIP - FLOGI already accepted - ignore return */ - fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); - } - fnic_update_mac(lport, mac); - } - - /* Change state to reflect transition to FC mode */ - spin_lock_irq(&fnic->fnic_lock); - if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) - fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; - else { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Unexpected fnic state %s while" - " processing flogi resp\n", - fnic_state_to_str(fnic->state)); - spin_unlock_irq(&fnic->fnic_lock); - return; - } - spin_unlock_irq(&fnic->fnic_lock); - - /* - * Send FLOGI registration to firmware to set up FC mode. - * The new address will be set up when registration completes. - */ - ret = fnic_flogi_reg_handler(fnic, port_id); - - if (ret < 0) { - spin_lock_irq(&fnic->fnic_lock); - if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) - fnic->state = FNIC_IN_ETH_MODE; - spin_unlock_irq(&fnic->fnic_lock); - } -} - static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped __attribute__((unused)), void *opaque) { struct fnic *fnic = vnic_dev_priv(rq->vdev); - struct sk_buff *skb; - struct fc_frame *fp; + uint8_t *fp; struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned int ethhdr_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; - u8 fcoe = 0, fcoe_sof, fcoe_eof; - u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; - u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; + u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0; u8 fcs_ok = 1, packet_error = 0; - u16 q_number, completed_index, bytes_written = 0, vlan, checksum; + u16 q_number, completed_index, vlan; u32 rss_hash; + u16 checksum; + u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 fcoe = 0, fcoe_sof, fcoe_eof; u16 exchange_id, tmpl; u8 sof = 0; u8 eof = 0; u32 fcp_bytes_written = 0; + u16 enet_bytes_written = 0; + u32 bytes_written = 0; unsigned long flags; + struct fnic_frame_list *frame_elem = NULL; + struct ethhdr *eh; dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - skb = buf->os_buf; - fp = (struct fc_frame *)skb; + DMA_FROM_DEVICE); + fp = (uint8_t *) buf->os_buf; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); if (type == CQ_DESC_TYPE_RQ_FCP) { - cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, - &tmpl, &fcp_bytes_written, &sof, &eof, - &ingress_port, &packet_error, - &fcoe_enc_error, &fcs_ok, &vlan_stripped, - &vlan); - skb_trim(skb, fcp_bytes_written); - fr_sof(fp) = sof; - fr_eof(fp) = eof; - + cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type, + &color, &q_number, &completed_index, &eop, &sop, + &fcoe_fnic_crc_ok, &exchange_id, &tmpl, + &fcp_bytes_written, &sof, &eof, &ingress_port, + &packet_error, &fcoe_enc_error, &fcs_ok, + &vlan_stripped, &vlan); + ethhdr_stripped = 1; + bytes_written = fcp_bytes_written; } else if (type == CQ_DESC_TYPE_RQ_ENET) { - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &ingress_port, &fcoe, &eop, &sop, - &rss_type, &csum_not_calc, &rss_hash, - &bytes_written, &packet_error, - &vlan_stripped, &vlan, &checksum, - &fcoe_sof, &fcoe_fc_crc_ok, - &fcoe_enc_error, &fcoe_eof, - &tcp_udp_csum_ok, &udp, &tcp, - &ipv4_csum_ok, &ipv6, &ipv4, - &ipv4_fragment, &fcs_ok); - skb_trim(skb, bytes_written); + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type, + &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &enet_bytes_written, + &packet_error, &vlan_stripped, &vlan, + &checksum, &fcoe_sof, &fcoe_fnic_crc_ok, + &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, + &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, + &ipv4_fragment, &fcs_ok); + + ethhdr_stripped = 0; + bytes_written = enet_bytes_written; + if (!fcs_ok) { atomic64_inc(&fnic_stats->misc_stats.frame_errors); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "fcs error. dropping packet.\n"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fcs error. Dropping packet.\n", fnic); goto drop; } - if (fnic_import_rq_eth_pkt(fnic, skb)) - return; + eh = (struct ethhdr *) fp; + if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) { + + if (fnic_import_rq_eth_pkt(fnic, fp)) + return; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping h_proto 0x%x", + be16_to_cpu(eh->h_proto)); + goto drop; + } } else { - /* wrong CQ type*/ - shost_printk(KERN_ERR, fnic->lport->host, - "fnic rq_cmpl wrong cq type x%x\n", type); + /* wrong CQ type */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic rq_cmpl wrong cq type x%x\n", type); goto drop; } - if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) { atomic64_inc(&fnic_stats->misc_stats.frame_errors); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "fnic rq_cmpl fcoe x%x fcsok x%x" - " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" - " x%x\n", - fcoe, fcs_ok, packet_error, - fcoe_fc_crc_ok, fcoe_enc_error); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n", + fcoe, fcs_ok, packet_error, + fcoe_fnic_crc_ok, fcoe_enc_error); goto drop; } spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic->stop_rx_link_events: %d\n", + fnic->stop_rx_link_events); goto drop; } - fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, - (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); + + frame_elem = mempool_alloc(fnic->frame_elem_pool, + GFP_ATOMIC | __GFP_ZERO); + if (!frame_elem) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for frame elem"); + goto drop; } + frame_elem->fp = fp; + frame_elem->rx_ethhdr_stripped = ethhdr_stripped; + frame_elem->frame_len = bytes_written; - skb_queue_tail(&fnic->frame_queue, skb); - queue_work(fnic_event_queue, &fnic->frame_work); + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&frame_elem->links, &fnic->frame_queue); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + queue_work(fnic_event_queue, &fnic->frame_work); return; + drop: - dev_kfree_skb_irq(skb); + kfree(fp); } static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, @@ -949,10 +576,10 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, fnic_rq_cmpl_handler_cont, NULL); - if (cur_work_done) { + if (cur_work_done && fnic->stop_rx_link_events != 1) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "fnic_alloc_rq_frame can't alloc" " frame\n"); } @@ -970,223 +597,186 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) int fnic_alloc_rq_frame(struct vnic_rq *rq) { struct fnic *fnic = vnic_dev_priv(rq->vdev); - struct sk_buff *skb; + void *buf; u16 len; dma_addr_t pa; - int r; + int ret; - len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; - skb = dev_alloc_skb(len); - if (!skb) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Unable to allocate RQ sk_buff\n"); + len = FNIC_FRAME_HT_ROOM; + buf = kmalloc(len, GFP_ATOMIC); + if (!buf) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unable to allocate RQ buffer of size: %d\n", len); return -ENOMEM; } - skb_reset_mac_header(skb); - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - skb_put(skb, len); - pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); + + pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(&fnic->pdev->dev, pa)) { - r = -ENOMEM; - printk(KERN_ERR "PCI mapping failed with error %d\n", r); - goto free_skb; + ret = -ENOMEM; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PCI mapping failed with error %d\n", ret); + goto free_buf; } - fnic_queue_rq_desc(rq, skb, pa, len); + fnic_queue_rq_desc(rq, buf, pa, len); return 0; - -free_skb: - kfree_skb(skb); - return r; +free_buf: + kfree(buf); + return ret; } void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) { - struct fc_frame *fp = buf->os_buf; + void *rq_buf = buf->os_buf; struct fnic *fnic = vnic_dev_priv(rq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_FROM_DEVICE); - dev_kfree_skb(fp_skb(fp)); + kfree(rq_buf); buf->os_buf = NULL; } -/** - * fnic_eth_send() - Send Ethernet frame. - * @fip: fcoe_ctlr instance. - * @skb: Ethernet Frame, FIP, without VLAN encapsulation. - */ -void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fnic *fnic = fnic_from_ctlr(fip); - struct vnic_wq *wq = &fnic->wq[0]; - dma_addr_t pa; - struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; - unsigned long flags; - - if (!fnic->vlan_hw_insert) { - eth_hdr = (struct ethhdr *)skb_mac_header(skb); - vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr)); - memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; - vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - } else { - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - } - - pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&fnic->pdev->dev, pa)) { - printk(KERN_ERR "DMA mapping failed\n"); - goto free_skb; - } - - spin_lock_irqsave(&fnic->wq_lock[0], flags); - if (!vnic_wq_desc_avail(wq)) - goto irq_restore; - - fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, - 0 /* hw inserts cos value */, - fnic->vlan_id, 1); - spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - return; - -irq_restore: - spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE); -free_skb: - kfree_skb(skb); -} - /* * Send FC frame. */ -static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) +static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len) { struct vnic_wq *wq = &fnic->wq[0]; - struct sk_buff *skb; dma_addr_t pa; - struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; - struct fcoe_hdr *fcoe_hdr; - struct fc_frame_header *fh; - u32 tot_len, eth_hdr_len; int ret = 0; unsigned long flags; - fh = fc_frame_header_get(fp); - skb = fp_skb(fp); - - if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) - return 0; - - if (!fnic->vlan_hw_insert) { - eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); - vlan_hdr = skb_push(skb, eth_hdr_len); - eth_hdr = (struct ethhdr *)vlan_hdr; - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); - vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); - fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); - } else { - eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); - eth_hdr = skb_push(skb, eth_hdr_len); - eth_hdr->h_proto = htons(ETH_P_FCOE); - fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); - } - - if (fnic->ctlr.map_dest) - fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); - else - memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); - memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); - - tot_len = skb->len; - BUG_ON(tot_len % 4); - - memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); - fcoe_hdr->fcoe_sof = fr_sof(fp); - if (FC_FCOE_VER) - FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); - - pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); - if (dma_mapping_error(&fnic->pdev->dev, pa)) { - ret = -ENOMEM; - printk(KERN_ERR "DMA map failed with error %d\n", ret); - goto free_skb_on_err; - } + pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE); + if (dma_mapping_error(&fnic->pdev->dev, pa)) + return -ENOMEM; - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, - (char *)eth_hdr, tot_len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); + if ((fnic_fc_trace_set_data(fnic->fnic_num, + FNIC_FC_SEND | 0x80, (char *) frame, + frame_len)) != 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic ctlr frame trace error"); } spin_lock_irqsave(&fnic->wq_lock[0], flags); if (!vnic_wq_desc_avail(wq)) { - dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); + dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "vnic work queue descriptor is not available"); ret = -1; - goto irq_restore; + goto fnic_send_frame_end; } - fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), - 0 /* hw inserts cos value */, - fnic->vlan_id, 1, 1, 1); + /* hw inserts cos value */ + fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T, + 0, fnic->vlan_id, 1, 1, 1); -irq_restore: +fnic_send_frame_end: spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - -free_skb_on_err: - if (ret) - dev_kfree_skb_any(fp_skb(fp)); - return ret; } -/* - * fnic_send - * Routine to send a raw frame +/** + * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE + * info. This interface is used only in the non fast path. (login, fabric + * registrations etc.) + * + * @fnic: fnic instance + * @frame: frame structure with FC payload filled in + * @frame_size: length of the frame to be sent + * @srcmac: source mac address + * @dstmac: destination mac address + * + * Called with the fnic lock held. */ -int fnic_send(struct fc_lport *lp, struct fc_frame *fp) +static int +fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size, + uint8_t *srcmac, uint8_t *dstmac) { - struct fnic *fnic = lport_priv(lp); - unsigned long flags; + struct ethhdr *pethhdr; + struct fcoe_hdr *pfcoe_hdr; + struct fnic_frame_list *frame_elem; + int len = frame_size; + int ret; + struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame + + FNIC_ETH_FCOE_HDRS_OFFSET); - if (fnic->in_remove) { - dev_kfree_skb(fp_skb(fp)); - return -1; - } + pethhdr = (struct ethhdr *) frame; + pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE); + memcpy(pethhdr->h_source, srcmac, ETH_ALEN); + memcpy(pethhdr->h_dest, dstmac, ETH_ALEN); + + pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr)); + pfcoe_hdr->fcoe_sof = FC_SOF_I3; /* * Queue frame if in a transitional state. * This occurs while registering the Port_ID / MAC address after FLOGI. */ - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic->state != FNIC_IN_FC_MODE) + && (fnic->state != FNIC_IN_ETH_MODE)) { + frame_elem = mempool_alloc(fnic->frame_elem_pool, + GFP_ATOMIC | __GFP_ZERO); + if (!frame_elem) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for frame elem"); + return -ENOMEM; + } + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n", + ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id), + fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + frame_elem->fp = frame; + frame_elem->frame_len = len; + list_add_tail(&frame_elem->links, &fnic->tx_queue); return 0; } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return fnic_send_frame(fnic, fp); + fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing"); + + ret = fnic_send_frame(fnic, frame, len); + return ret; +} + +void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + int frame_size) +{ + struct fnic *fnic = iport->fnic; + uint8_t *dstmac, *srcmac; + + /* If module unload is in-progress, don't send */ + if (fnic->in_remove) + return; + + if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) { + srcmac = iport->fpma; + dstmac = iport->fcfmac; + } else { + srcmac = iport->hwmac; + dstmac = FCOE_ALL_FCF_MAC; + } + + fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac); +} + +int +fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame, + int frame_size) +{ + struct fnic *fnic = iport->fnic; + + if (fnic->in_remove) + return -1; + + fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing"); + return fnic_send_frame(fnic, frame, frame_size); } /** * fnic_flush_tx() - send queued frames. - * @fnic: fnic device + * @work: pointer to work element * * Send frames that were waiting to go out in FC or Ethernet mode. * Whenever changing modes we purge queued frames, so these frames should @@ -1194,66 +784,90 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) * * Called without fnic_lock held. */ -void fnic_flush_tx(struct fnic *fnic) +void fnic_flush_tx(struct work_struct *work) { - struct sk_buff *skb; + struct fnic *fnic = container_of(work, struct fnic, flush_work); struct fc_frame *fp; + struct fnic_frame_list *cur_frame, *next; - while ((skb = skb_dequeue(&fnic->tx_queue))) { - fp = (struct fc_frame *)skb; - fnic_send_frame(fnic, fp); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flush queued frames"); + + list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) { + fp = cur_frame->fp; + list_del(&cur_frame->links); + fnic_send_frame(fnic, fp, cur_frame->frame_len); + mempool_free(cur_frame, fnic->frame_elem_pool); } } -/** - * fnic_set_eth_mode() - put fnic into ethernet mode. - * @fnic: fnic device - * - * Called without fnic lock held. - */ -static void fnic_set_eth_mode(struct fnic *fnic) +int +fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + void *fp) { - unsigned long flags; - enum fnic_state old_state; + struct fnic *fnic = iport->fnic; + struct ethhdr *ethhdr; int ret; - spin_lock_irqsave(&fnic->fnic_lock, flags); -again: - old_state = fnic->state; - switch (old_state) { - case FNIC_IN_FC_MODE: - case FNIC_IN_ETH_TRANS_FC_MODE: - default: - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id, + fp, fnic->state); - ret = fnic_fw_reset_handler(fnic); + if (fp) { + ethhdr = (struct ethhdr *) fp; + vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest); + } - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) - goto again; - if (ret) - fnic->state = old_state; - break; - - case FNIC_IN_FC_TRANS_ETH_MODE: - case FNIC_IN_ETH_MODE: - break; + /* Change state to reflect transition to FC mode */ + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) + fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unexpected fnic state while processing FLOGI response\n"); + return -1; + } + + /* + * Send FLOGI registration to firmware to set up FC mode. + * The new address will be set up when registration completes. + */ + ret = fnic_flogi_reg_handler(fnic, port_id); + if (ret < 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI registration error ret: %d fnic state: %d\n", + ret, fnic->state); + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) + fnic->state = FNIC_IN_ETH_MODE; + + return -1; + } + iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI registration success\n"); + return 0; +} + +void fnic_free_txq(struct list_head *head) +{ + struct fnic_frame_list *cur_frame, *next; + + list_for_each_entry_safe(cur_frame, next, head, links) { + list_del(&cur_frame->links); + kfree(cur_frame->fp); + kfree(cur_frame); } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); } static void fnic_wq_complete_frame_send(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) { - struct sk_buff *skb = buf->os_buf; - struct fc_frame *fp = (struct fc_frame *)skb; struct fnic *fnic = vnic_dev_priv(wq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(fp_skb(fp)); + mempool_free(buf->os_buf, fnic->frame_pool); buf->os_buf = NULL; } @@ -1291,119 +905,218 @@ int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { - struct fc_frame *fp = buf->os_buf; struct fnic *fnic = vnic_dev_priv(wq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_TO_DEVICE); - dev_kfree_skb(fp_skb(fp)); + kfree(buf->os_buf); buf->os_buf = NULL; } -void fnic_fcoe_reset_vlans(struct fnic *fnic) +void +fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport, + unsigned long flags) +{ + struct fnic *fnic = iport->fnic; + struct fc_rport *rport; + struct fc_rport_identifiers ids; + struct rport_dd_data_s *rdd_data; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Adding rport fcid: 0x%x", tport->fcid); + + ids.node_name = tport->wwnn; + ids.port_name = tport->wwpn; + ids.port_id = tport->fcid; + ids.roles = FC_RPORT_ROLE_FCP_TARGET; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + rport = fc_remote_port_add(fnic->host, 0, &ids); + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!rport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to add rport for tport: 0x%x", tport->fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Added rport fcid: 0x%x", tport->fcid); + + /* Mimic these assignments in queuecommand to avoid timing issues */ + rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; + rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; + rdd_data = rport->dd_data; + rdd_data->tport = tport; + rdd_data->iport = iport; + tport->rport = rport; + tport->flags |= FNIC_FDLS_SCSI_REGISTERED; +} + +void +fnic_fdls_remove_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags) +{ + struct fnic *fnic = iport->fnic; + struct rport_dd_data_s *rdd_data; + + struct fc_rport *rport; + + if (!tport) + return; + + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE); + rport = tport->rport; + + if (rport) { + /* tport resource release will be done + * after fnic_terminate_rport_io() + */ + tport->flags |= FNIC_FDLS_TPORT_DELETED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* Interface to scsi_fc_transport */ + fc_remote_port_delete(rport); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Deregistered and freed tport fcid: 0x%x from scsi transport fc", + tport->fcid); + + /* + * the dd_data is allocated by fc transport + * of size dd_fcrport_size + */ + rdd_data = rport->dd_data; + rdd_data->tport = NULL; + rdd_data->iport = NULL; + list_del(&tport->links); + kfree(tport); + } else { + fnic_del_tport_timer_sync(fnic, tport); + list_del(&tport->links); + kfree(tport); + } +} + +void fnic_delete_fcp_tports(struct fnic *fnic) { + struct fnic_tport_s *tport, *next; unsigned long flags; - struct fcoe_vlan *vlan; - struct fcoe_vlan *next; - /* - * indicate a link down to fcoe so that all fcf's are free'd - * might not be required since we did this before sending vlan - * discovery request - */ - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (!list_empty(&fnic->vlans)) { - list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { - list_del(&vlan->list); - kfree(vlan); - } + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "removing fcp rport fcid: 0x%x", tport->fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); + fnic_del_tport_timer_sync(fnic, tport); + fnic_fdls_remove_tport(&fnic->iport, tport, flags); } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); } -void fnic_handle_fip_timer(struct fnic *fnic) +/** + * fnic_tport_event_handler() - Handler for remote port events + * in the tport_event_queue. + * + * @work: Handle to the remote port being dequeued + */ +void fnic_tport_event_handler(struct work_struct *work) { + struct fnic *fnic = container_of(work, struct fnic, tport_work); + struct fnic_tport_event_s *cur_evt, *next; unsigned long flags; - struct fcoe_vlan *vlan; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - u64 sol_time; + struct fnic_tport_s *tport; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; + list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + tport = cur_evt->arg1; + switch (cur_evt->event) { + case TGT_EV_RPORT_ADD: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Add rport event"); + if (tport->state == FDLS_TGT_STATE_READY) { + fnic_fdls_add_tport(&fnic->iport, + (struct fnic_tport_s *) cur_evt->arg1, flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Target not ready. Add rport event dropped: 0x%x", + tport->fcid); + } + break; + case TGT_EV_RPORT_DEL: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remove rport event"); + if (tport->state == FDLS_TGT_STATE_OFFLINING) { + fnic_fdls_remove_tport(&fnic->iport, + (struct fnic_tport_s *) cur_evt->arg1, flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "remove rport event dropped tport fcid: 0x%x", + tport->fcid); + } + break; + case TGT_EV_TPORT_DELETE: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Delete tport event"); + fdls_delete_tport(tport->iport, tport); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unknown tport event"); + break; + } + list_del(&cur_evt->links); + kfree(cur_evt); } spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} - if (fnic->ctlr.mode == FIP_MODE_NON_FIP) - return; +void fnic_flush_tport_event_list(struct fnic *fnic) +{ + struct fnic_tport_event_s *cur_evt, *next; + unsigned long flags; - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (list_empty(&fnic->vlans)) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - /* no vlans available, try again */ - if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) - if (printk_ratelimit()) - shost_printk(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - return; + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + list_del(&cur_evt->links); + kfree(cur_evt); } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "fip_timer: vlan %d state %d sol_count %d\n", - vlan->vid, vlan->state, vlan->sol_count); - switch (vlan->state) { - case FIP_VLAN_USED: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "FIP VLAN is selected for FC transaction\n"); - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - break; - case FIP_VLAN_FAILED: - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - /* if all vlans are in failed state, restart vlan disc */ - if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) - if (printk_ratelimit()) - shost_printk(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - break; - case FIP_VLAN_SENT: - if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { - /* - * no response on this vlan, remove from the list. - * Try the next vlan - */ - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "Dequeue this VLAN ID %d from list\n", - vlan->vid); - list_del(&vlan->list); - kfree(vlan); - vlan = NULL; - if (list_empty(&fnic->vlans)) { - /* we exhausted all vlans, restart vlan disc */ - spin_unlock_irqrestore(&fnic->vlans_lock, - flags); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "fip_timer: vlan list empty, " - "trigger vlan disc\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - return; - } - /* check the next vlan */ - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, - list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); - vlan->sol_count++; - sol_time = jiffies + msecs_to_jiffies - (FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); - break; +void fnic_reset_work_handler(struct work_struct *work) +{ + struct fnic *cur_fnic, *next_fnic; + unsigned long reset_fnic_list_lock_flags; + int host_reset_ret_code; + + /* + * This is a single thread. It is per fnic module, not per fnic + * All the fnics that need to be reset + * have been serialized via the reset fnic list. + */ + spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags); + list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) { + list_del(&cur_fnic->links); + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + + dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n", + cur_fnic->fnic_num); + host_reset_ret_code = fnic_host_reset(cur_fnic->host); + dev_err(&cur_fnic->pdev->dev, + "fnic: <%d>: returned from host reset with status: %d\n", + cur_fnic->fnic_num, host_reset_ret_code); + + spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags); + cur_fnic->pc_rscn_handling_status = + PC_RSCN_HANDLING_NOT_IN_PROGRESS; + spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags); + + spin_lock_irqsave(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); } + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); } diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h new file mode 100644 index 0000000000000..531d0b37e450f --- /dev/null +++ b/drivers/scsi/fnic/fnic_fdls.h @@ -0,0 +1,435 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FNIC_FDLS_H_ +#define _FNIC_FDLS_H_ + +#include "fnic_stats.h" +#include "fdls_fc.h" + +/* FDLS - Fabric discovery and login services + * -> VLAN discovery + * -> retry every retry delay seconds until it succeeds. + * <- List of VLANs + * + * -> Solicitation + * <- Solicitation response (Advertisement) + * + * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV) + * <- FLOGI response + * + * -> FCF keep alive + * <- FCF keep alive + * + * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PLOGI response + * -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg + * + * -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- SCR response + * -> Retry SCR - Number of retries 2 + * + * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a + * -> Retry on BUSY until it succeeds + * -> Retry on BUSY until it succeeds + * -> 2 retries on timeout + * + * -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 ) + * -> Ignore if both retires fail. + * + * Session establishment with targets + * For each PWWN + * -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PLOGI response + * -> Retry PLOGI. Num retries using vnic.cfg + * + * -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PRLI response + * -> Retry PRLI. Num retries using vnic.cfg + * + */ + +#define FDLS_RETRY_COUNT 2 + +/* + * OXID encoding: + * bits 0-8: oxid idx - allocated from poool + * bits 9-13: oxid frame code from fnic_oxid_frame_type_e + * bits 14-15: all zeros + */ +#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */ +#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx) +#define FNIC_FRAME_MASK 0xFE00 +#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK) +#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1)) + +#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */ + +#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1 +#define FNIC_FDLS_FPMA_LEARNT 0x2 + +/* tport flags */ +#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1 +#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2 +#define FNIC_FDLS_TPORT_SEND_ADISC 0x4 +#define FNIC_FDLS_RETRY_FRAME 0x8 +#define FNIC_FDLS_TPORT_BUSY 0x10 +#define FNIC_FDLS_TPORT_TERMINATING 0x20 +#define FNIC_FDLS_TPORT_DELETED 0x40 +#define FNIC_FDLS_SCSI_REGISTERED 0x200 + +/* Retry supported by rport(returned by prli service parameters) */ +#define FDLS_FC_RP_FLAGS_RETRY 0x1 + +#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state) +#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state) + +#define FNIC_FDMI_ACTIVE 0x8 +#define FNIC_FIRST_LINK_UP 0x2 + +#define fdls_set_tport_state(_tport, _state) (_tport->state = _state) +#define fdls_get_tport_state(_tport) (_tport->state) + +#define FNIC_PORTSPEED_10GBIT 1 +#define FNIC_FRAME_HT_ROOM (2148) +#define FNIC_FCOE_FRAME_MAXSZ (2112) + + +#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000 +#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200 +#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400 +#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600 +#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800 +#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00 +#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00 +#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00 +#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000 +#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200 +#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400 +#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600 +#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800 +#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00 +#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00 + +struct fnic_fip_fcf_s { + uint16_t vlan_id; + uint8_t fcf_mac[6]; + uint8_t fcf_priority; + uint32_t fka_adv_period; + uint8_t ka_disabled; +}; + +enum fnic_fdls_state_e { + FDLS_STATE_INIT = 0, + FDLS_STATE_LINKDOWN, + FDLS_STATE_FABRIC_LOGO, + FDLS_STATE_FLOGO_DONE, + FDLS_STATE_FABRIC_FLOGI, + FDLS_STATE_FABRIC_PLOGI, + FDLS_STATE_RPN_ID, + FDLS_STATE_REGISTER_FC4_TYPES, + FDLS_STATE_REGISTER_FC4_FEATURES, + FDLS_STATE_SCR, + FDLS_STATE_GPN_FT, + FDLS_STATE_TGT_DISCOVERY, + FDLS_STATE_RSCN_GPN_FT, + FDLS_STATE_SEND_GPNFT +}; + +struct fnic_fdls_fabric_s { + enum fnic_fdls_state_e state; + uint32_t flags; + struct list_head tport_list; /* List of discovered tports */ + struct timer_list retry_timer; + int del_timer_inprogress; + int del_fdmi_timer_inprogress; + int retry_counter; + int timer_pending; + int fdmi_retry; + struct timer_list fdmi_timer; + int fdmi_pending; +}; + +struct fnic_fdls_fip_s { + uint32_t state; + uint32_t flogi_retry; +}; + +/* Message to tport_event_handler */ +enum fnic_tgt_msg_id { + TGT_EV_NONE = 0, + TGT_EV_RPORT_ADD, + TGT_EV_RPORT_DEL, + TGT_EV_TPORT_DELETE, + TGT_EV_REMOVE +}; + +struct fnic_tport_event_s { + struct list_head links; + enum fnic_tgt_msg_id event; + void *arg1; +}; + +enum fdls_tgt_state_e { + FDLS_TGT_STATE_INIT = 0, + FDLS_TGT_STATE_PLOGI, + FDLS_TGT_STATE_PRLI, + FDLS_TGT_STATE_READY, + FDLS_TGT_STATE_LOGO_RECEIVED, + FDLS_TGT_STATE_ADISC, + FDL_TGT_STATE_PLOGO, + FDLS_TGT_STATE_OFFLINING, + FDLS_TGT_STATE_OFFLINE +}; + +struct fnic_tport_s { + struct list_head links; /* To link the tports */ + enum fdls_tgt_state_e state; + uint32_t flags; + uint32_t fcid; + uint64_t wwpn; + uint64_t wwnn; + uint16_t active_oxid; + uint16_t tgt_flags; + atomic_t in_flight; /* io counter */ + uint16_t max_payload_size; + uint16_t r_a_tov; + uint16_t e_d_tov; + uint16_t lun0_delay; + int max_concur_seqs; + uint32_t fcp_csp; + struct timer_list retry_timer; + int del_timer_inprogress; + int retry_counter; + int timer_pending; + unsigned int num_pending_cmds; + int nexus_restart_count; + int exch_reset_in_progress; + void *iport; + struct work_struct tport_del_work; + struct completion *tport_del_done; + struct fc_rport *rport; + char str_wwpn[20]; + char str_wwnn[20]; +}; + +/* OXID pool related structures */ +struct reclaim_entry_s { + struct list_head links; + /* oxid that needs to be freed after 2*r_a_tov */ + uint16_t oxid_idx; + /* in jiffies. Use this to waiting time */ + unsigned long expires; + unsigned long *bitmap; +}; + +/* used for allocating oxids for fabric and fdmi requests */ +struct fnic_oxid_pool_s { + DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ); + int sz; /* size of the pool or block */ + int next_idx; /* used for cycling through the oxid pool */ + + /* retry schedule free */ + DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ); + struct delayed_work schedule_oxid_free_retry; + + /* List of oxids that need to be freed and reclaimed. + * This list is shared by all the oxid pools + */ + struct list_head oxid_reclaim_list; + /* Work associated with reclaim list */ + struct delayed_work oxid_reclaim_work; +}; + +/* iport */ +enum fnic_iport_state_e { + FNIC_IPORT_STATE_INIT = 0, + FNIC_IPORT_STATE_LINK_WAIT, + FNIC_IPORT_STATE_FIP, + FNIC_IPORT_STATE_FABRIC_DISC, + FNIC_IPORT_STATE_READY +}; + +struct fnic_iport_s { + enum fnic_iport_state_e state; + struct fnic *fnic; + uint64_t boot_time; + uint32_t flags; + int usefip; + uint8_t hwmac[6]; /* HW MAC Addr */ + uint8_t fpma[6]; /* Fabric Provided MA */ + uint8_t fcfmac[6]; /* MAC addr of Fabric */ + uint16_t vlan_id; + uint32_t fcid; + + /* oxid pool */ + struct fnic_oxid_pool_s oxid_pool; + + /* + * fabric reqs are serialized and only one req at a time. + * Tracking the oxid for sending abort + */ + uint16_t active_oxid_fabric_req; + /* fdmi only */ + uint16_t active_oxid_fdmi_plogi; + uint16_t active_oxid_fdmi_rhba; + uint16_t active_oxid_fdmi_rpa; + + struct fnic_fip_fcf_s selected_fcf; + struct fnic_fdls_fip_s fip; + struct fnic_fdls_fabric_s fabric; + struct list_head tport_list; + struct list_head tport_list_pending_del; + /* list of tports for which we are yet to send PLOGO */ + struct list_head inprocess_tport_list; + struct list_head deleted_tport_list; + struct work_struct tport_event_work; + uint32_t e_d_tov; /* msec */ + uint32_t r_a_tov; /* msec */ + uint32_t link_supported_speeds; + uint32_t max_flogi_retries; + uint32_t max_plogi_retries; + uint32_t plogi_timeout; + uint32_t service_params; + uint64_t wwpn; + uint64_t wwnn; + uint16_t max_payload_size; + spinlock_t deleted_tport_lst_lock; + struct completion *flogi_reg_done; + struct fnic_iport_stats iport_stats; + char str_wwpn[20]; + char str_wwnn[20]; +}; + +struct rport_dd_data_s { + struct fnic_tport_s *tport; + struct fnic_iport_s *iport; +}; + +enum fnic_recv_frame_type_e { + FNIC_FABRIC_FLOGI_RSP = 1, + FNIC_FABRIC_PLOGI_RSP, + FNIC_FABRIC_RPN_RSP, + FNIC_FABRIC_RFT_RSP, + FNIC_FABRIC_RFF_RSP, + FNIC_FABRIC_SCR_RSP, + FNIC_FABRIC_GPN_FT_RSP, + FNIC_FABRIC_BLS_ABTS_RSP, + FNIC_FDMI_PLOGI_RSP, + FNIC_FDMI_REG_HBA_RSP, + FNIC_FDMI_RPA_RSP, + FNIC_FDMI_BLS_ABTS_RSP, + FNIC_FABRIC_LOGO_RSP, + + /* responses to target requests */ + FNIC_TPORT_PLOGI_RSP, + FNIC_TPORT_PRLI_RSP, + FNIC_TPORT_ADISC_RSP, + FNIC_TPORT_BLS_ABTS_RSP, + FNIC_TPORT_LOGO_RSP, + + /* unsolicited requests */ + FNIC_BLS_ABTS_REQ, + FNIC_ELS_PLOGI_REQ, + FNIC_ELS_RSCN_REQ, + FNIC_ELS_LOGO_REQ, + FNIC_ELS_ECHO_REQ, + FNIC_ELS_ADISC, + FNIC_ELS_RLS, + FNIC_ELS_RRQ, + FNIC_ELS_UNSUPPORTED_REQ, +}; + +enum fnic_port_speeds { + DCEM_PORTSPEED_NONE = 0, + DCEM_PORTSPEED_1G = 1000, + DCEM_PORTSPEED_2G = 2000, + DCEM_PORTSPEED_4G = 4000, + DCEM_PORTSPEED_8G = 8000, + DCEM_PORTSPEED_10G = 10000, + DCEM_PORTSPEED_16G = 16000, + DCEM_PORTSPEED_20G = 20000, + DCEM_PORTSPEED_25G = 25000, + DCEM_PORTSPEED_32G = 32000, + DCEM_PORTSPEED_40G = 40000, + DCEM_PORTSPEED_4x10G = 41000, + DCEM_PORTSPEED_50G = 50000, + DCEM_PORTSPEED_64G = 64000, + DCEM_PORTSPEED_100G = 100000, + DCEM_PORTSPEED_128G = 128000, +}; + +/* Function Declarations */ +/* fdls_disc.c */ +void fnic_fdls_disc_init(struct fnic_iport_s *iport); +void fnic_fdls_disc_start(struct fnic_iport_s *iport); +void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, + int len, int fchdr_offset); +void fnic_fdls_link_down(struct fnic_iport_s *iport); +int fdls_init_frame_pool(struct fnic_iport_s *iport); +uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport); +uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, + uint16_t *active_oxid); +void fdls_free_oxid(struct fnic_iport_s *iport, + uint16_t oxid, uint16_t *active_oxid); +void fdls_tgt_logout(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +void fnic_del_fabric_timer_sync(struct fnic *fnic); +void fnic_del_tport_timer_sync(struct fnic *fnic, + struct fnic_tport_s *tport); +void fdls_send_fabric_logo(struct fnic_iport_s *iport); +int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr); +void fdls_send_tport_abts(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +bool fdls_delete_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +void fdls_fdmi_timer_callback(struct timer_list *t); +void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport); + +/* fnic_fcs.c */ +void fnic_fdls_init(struct fnic *fnic, int usefip); +void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + int frame_size); +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +int fnic_send_fip_frame(struct fnic_iport_s *iport, + void *frame, int frame_size); +void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid); +void fnic_fdls_add_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags); +void fnic_fdls_remove_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, + unsigned long flags); + +/* fip.c */ +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +void fnic_common_fip_cleanup(struct fnic *fnic); +int fdls_fip_recv_frame(struct fnic *fnic, void *frame); +void fnic_handle_fcs_ka_timer(struct timer_list *t); +void fnic_handle_enode_ka_timer(struct timer_list *t); +void fnic_handle_vn_ka_timer(struct timer_list *t); +void fnic_handle_fip_timer(struct timer_list *t); +extern void fdls_fabric_timer_callback(struct timer_list *t); + +/* fnic_scsi.c */ +void fnic_scsi_fcpio_reset(struct fnic *fnic); +extern void fdls_fabric_timer_callback(struct timer_list *t); +void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid); +int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + void *fp); +struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport, + uint32_t fcid); +struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport, + uint64_t wwpn); + +#endif /* _FNIC_FDLS_H_ */ diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h deleted file mode 100644 index 7761f33ab5d4c..0000000000000 --- a/drivers/scsi/fnic/fnic_fip.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _FNIC_FIP_H_ -#define _FNIC_FIP_H_ - - -#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ -#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ -#define FCOE_CTLR_MAX_SOL 8 - -#define FINC_MAX_FLOGI_REJECTS 8 - -struct vlan { - __be16 vid; - __be16 type; -}; - -/* - * VLAN entry. - */ -struct fcoe_vlan { - struct list_head list; - u16 vid; /* vlan ID */ - u16 sol_count; /* no. of sols sent */ - u16 state; /* state */ -}; - -enum fip_vlan_state { - FIP_VLAN_AVAIL = 0, /* don't do anything */ - FIP_VLAN_SENT = 1, /* sent */ - FIP_VLAN_USED = 2, /* succeed */ - FIP_VLAN_FAILED = 3, /* failed to response */ -}; - -struct fip_vlan { - struct ethhdr eth; - struct fip_header fip; - struct { - struct fip_mac_desc mac; - struct fip_wwn_desc wwnn; - } desc; -}; - -#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index 1cb6a68c8e4ec..4c45ea9e0d4cb 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -19,6 +19,7 @@ #define _FNIC_IO_H_ #include +#include "fnic_fdls.h" #define FNIC_DFLT_SG_DESC_CNT 32 #define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */ @@ -53,6 +54,8 @@ enum fnic_ioreq_state { }; struct fnic_io_req { + struct fnic_iport_s *iport; + struct fnic_tport_s *tport; struct host_sg_desc *sgl_list; /* sgl list */ void *sgl_list_alloc; /* sgl list address used for free */ dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ @@ -64,16 +67,7 @@ struct fnic_io_req { unsigned long start_time; /* in jiffies */ struct completion *abts_done; /* completion for abts */ struct completion *dr_done; /* completion for device reset */ -}; - -enum fnic_port_speeds { - DCEM_PORTSPEED_NONE = 0, - DCEM_PORTSPEED_1G = 1000, - DCEM_PORTSPEED_10G = 10000, - DCEM_PORTSPEED_20G = 20000, - DCEM_PORTSPEED_25G = 25000, - DCEM_PORTSPEED_40G = 40000, - DCEM_PORTSPEED_4x10G = 41000, - DCEM_PORTSPEED_100G = 100000, + unsigned int tag; + struct scsi_cmnd *sc; /* midlayer's cmd pointer */ }; #endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 2fb2731f50fbc..74e72f1965112 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include "vnic_dev.h" #include "vnic_intr.h" @@ -50,8 +50,13 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data) fnic_log_q_error(fnic); } + if (pba & (1 << FNIC_INTX_DUMMY)) { + atomic64_inc(&fnic->fnic_stats.misc_stats.intx_dummy); + vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_DUMMY]); + } + if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { - work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions); + work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -72,7 +77,7 @@ static irqreturn_t fnic_isr_msi(int irq, void *data) fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); - work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions); + work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -121,12 +126,22 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; + int i; fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); - wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions); - vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], + i = irq - fnic->msix[0].irq_num; + if (i >= fnic->wq_copy_count + fnic->copy_wq_base || + i < 0 || fnic->msix[i].irq_num != irq) { + for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base ; i++) { + if (fnic->msix[i].irq_num == irq) + break; + } + } + + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions, i); + vnic_intr_return_credits(&fnic->intr[i], wq_copy_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); @@ -140,7 +155,7 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); - vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); + vnic_intr_return_all_credits(&fnic->intr[fnic->err_intr_offset]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); @@ -198,26 +213,30 @@ int fnic_request_intr(struct fnic *fnic) fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; fnic->msix[FNIC_MSIX_WQ].devid = fnic; - sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, - "%.11s-scsi-wq", fnic->name); - fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; - fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; + for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base; i++) { + sprintf(fnic->msix[i].devname, + "%.11s-scsi-wq-%d", fnic->name, i-FNIC_MSIX_WQ_COPY); + fnic->msix[i].isr = fnic_isr_msix_wq_copy; + fnic->msix[i].devid = fnic; + } - sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, + sprintf(fnic->msix[fnic->err_intr_offset].devname, "%.11s-err-notify", fnic->name); - fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = + fnic->msix[fnic->err_intr_offset].isr = fnic_isr_msix_err_notify; - fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; + fnic->msix[fnic->err_intr_offset].devid = fnic; - for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { - err = request_irq(pci_irq_vector(fnic->pdev, i), - fnic->msix[i].isr, 0, - fnic->msix[i].devname, - fnic->msix[i].devid); + for (i = 0; i < fnic->intr_count; i++) { + fnic->msix[i].irq_num = pci_irq_vector(fnic->pdev, i); + + err = request_irq(fnic->msix[i].irq_num, + fnic->msix[i].isr, 0, + fnic->msix[i].devname, + fnic->msix[i].devid); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "MSIX: request_irq" - " failed %d\n", err); + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "request_irq failed with error: %d\n", + err); fnic_free_intr(fnic); break; } @@ -232,44 +251,99 @@ int fnic_request_intr(struct fnic *fnic) return err; } -int fnic_set_intr_mode(struct fnic *fnic) +int fnic_set_intr_mode_msix(struct fnic *fnic) { unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); - unsigned int o = ARRAY_SIZE(fnic->wq_copy); + unsigned int o = ARRAY_SIZE(fnic->hw_copy_wq); + unsigned int min_irqs = n + m + 1 + 1; /*rq, raw wq, wq, err*/ /* - * Set interrupt mode (INTx, MSI, MSI-X) depending - * system capabilities. - * - * Try MSI-X first - * * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ - if (fnic->rq_count >= n && - fnic->raw_wq_count >= m && - fnic->wq_copy_count >= o && - fnic->cq_count >= n + m + o) { - int vecs = n + m + o + 1; - - if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs, - PCI_IRQ_MSIX) == vecs) { + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "rq-array size: %d wq-array size: %d copy-wq array size: %d\n", + n, m, o); + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n", + fnic->rq_count, fnic->raw_wq_count, + fnic->wq_copy_count, fnic->cq_count); + + if (fnic->rq_count <= n && fnic->raw_wq_count <= m && + fnic->wq_copy_count <= o) { + int vec_count = 0; + int vecs = fnic->rq_count + fnic->raw_wq_count + fnic->wq_copy_count + 1; + + vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "allocated %d MSI-X vectors\n", + vec_count); + + if (vec_count > 0) { + if (vec_count < vecs) { + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "interrupts number mismatch: vec_count: %d vecs: %d\n", + vec_count, vecs); + if (vec_count < min_irqs) { + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "no interrupts for copy wq\n"); + return 1; + } + } + fnic->rq_count = n; fnic->raw_wq_count = m; - fnic->wq_copy_count = o; - fnic->wq_count = m + o; - fnic->cq_count = n + m + o; - fnic->intr_count = vecs; - fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; - - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, - "Using MSI-X Interrupts\n"); - vnic_dev_set_intr_mode(fnic->vdev, - VNIC_DEV_INTR_MODE_MSIX); + fnic->copy_wq_base = fnic->rq_count + fnic->raw_wq_count; + fnic->wq_copy_count = vec_count - n - m - 1; + fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count; + if (fnic->cq_count != vec_count - 1) { + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "CQ count: %d does not match MSI-X vector count: %d\n", + fnic->cq_count, vec_count); + fnic->cq_count = vec_count - 1; + } + fnic->intr_count = vec_count; + fnic->err_intr_offset = fnic->rq_count + fnic->wq_count; + + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "rq_count: %d raw_wq_count: %d copy_wq_base: %d\n", + fnic->rq_count, + fnic->raw_wq_count, fnic->copy_wq_base); + + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "wq_copy_count: %d wq_count: %d cq_count: %d\n", + fnic->wq_copy_count, + fnic->wq_count, fnic->cq_count); + + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "intr_count: %d err_intr_offset: %u", + fnic->intr_count, + fnic->err_intr_offset); + + vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic using MSI-X\n"); return 0; } } + return 1; +} + +int fnic_set_intr_mode(struct fnic *fnic) +{ + int ret_status = 0; + + /* + * Set interrupt mode (INTx, MSI, MSI-X) depending + * system capabilities. + * + * Try MSI-X first + */ + ret_status = fnic_set_intr_mode_msix(fnic); + if (ret_status == 0) + return ret_status; /* * Next try MSI @@ -289,7 +363,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->intr_count = 1; fnic->err_intr_offset = 0; - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); @@ -315,7 +389,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->cq_count = 3; fnic->intr_count = 3; - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d09d15fff0656..dfeb49a5d89aa 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -24,23 +24,25 @@ #include #include #include +#include #include #include #include +#include #include #include #include #include #include -#include #include #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" -#include "fnic_fip.h" #include "fnic.h" +#include "fnic_fdls.h" +#include "fdls_fc.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -49,8 +51,15 @@ static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; static struct kmem_cache *fnic_io_req_cache; +static struct kmem_cache *fdls_frame_cache; +static struct kmem_cache *fdls_frame_elem_cache; static LIST_HEAD(fnic_list); static DEFINE_SPINLOCK(fnic_list_lock); +static DEFINE_IDA(fnic_ida); + +struct work_struct reset_fnic_work; +LIST_HEAD(reset_fnic_list); +DEFINE_SPINLOCK(reset_fnic_list_lock); /* Supported devices by fnic module */ static struct pci_device_id fnic_id_table[] = { @@ -69,6 +78,14 @@ unsigned int fnic_log_level; module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); +unsigned int fnic_fdmi_support = 1; +module_param(fnic_fdmi_support, int, 0644); +MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support"); + +static unsigned int fnic_tgt_id_binding = 1; +module_param(fnic_tgt_id_binding, uint, 0644); +MODULE_PARM_DESC(fnic_tgt_id_binding, + "Target ID binding (0 for none. 1 for binding by WWPN (default))"); unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS; module_param(io_completions, int, S_IRUGO|S_IWUSR); @@ -88,13 +105,13 @@ static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); -static struct libfc_function_template fnic_transport_template = { - .frame_send = fnic_send, - .lport_set_port_id = fnic_set_port_id, - .fcp_abort_io = fnic_empty_scsi_cleanup, - .fcp_cleanup = fnic_empty_scsi_cleanup, - .exch_mgr_reset = fnic_exch_mgr_reset -}; +unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON; +module_param(pc_rscn_handling_feature_flag, uint, 0644); +MODULE_PARM_DESC(pc_rscn_handling_feature_flag, + "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))"); + +struct workqueue_struct *reset_fnic_work_queue; +struct workqueue_struct *fnic_fip_queue; static int fnic_slave_alloc(struct scsi_device *sdev) { @@ -114,7 +131,7 @@ static struct scsi_host_template fnic_host_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = fnic_abort_cmd, .eh_device_reset_handler = fnic_device_reset, - .eh_host_reset_handler = fnic_host_reset, + .eh_host_reset_handler = fnic_eh_host_reset_handler, .slave_alloc = fnic_slave_alloc, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, @@ -124,6 +141,8 @@ static struct scsi_host_template fnic_host_template = { .max_sectors = 0xffff, .shost_attrs = fnic_attrs, .track_queue_depth = 1, + .cmd_size = sizeof(struct fnic_cmd_priv), + .map_queues = fnic_mq_map_queues_cpus, }; static void @@ -153,7 +172,7 @@ static struct fc_function_template fnic_fc_functions = { .get_host_speed = fnic_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, - .get_host_port_state = fc_get_host_port_state, + .get_host_port_state = fnic_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .show_rport_maxframe_size = 1, @@ -164,54 +183,88 @@ static struct fc_function_template fnic_fc_functions = { .show_starget_port_id = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, - .issue_fc_host_lip = fnic_reset, + .issue_fc_host_lip = fnic_issue_fc_host_lip, .get_fc_host_stats = fnic_get_stats, .reset_fc_host_stats = fnic_reset_host_stats, - .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .dd_fcrport_size = sizeof(struct rport_dd_data_s), .terminate_rport_io = fnic_terminate_rport_io, - .bsg_request = fc_lport_bsg_request, + .bsg_request = NULL, }; static void fnic_get_host_speed(struct Scsi_Host *shost) { - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); u32 port_speed = vnic_dev_port_speed(fnic->vdev); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "port_speed: %d Mbps", port_speed); + atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed); /* Add in other values as they get defined in fw */ switch (port_speed) { + case DCEM_PORTSPEED_1G: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case DCEM_PORTSPEED_2G: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case DCEM_PORTSPEED_4G: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case DCEM_PORTSPEED_8G: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case DCEM_PORTSPEED_16G: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; case DCEM_PORTSPEED_20G: fc_host_speed(shost) = FC_PORTSPEED_20GBIT; break; case DCEM_PORTSPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; + case DCEM_PORTSPEED_32G: + fc_host_speed(shost) = FC_PORTSPEED_32GBIT; + break; case DCEM_PORTSPEED_40G: case DCEM_PORTSPEED_4x10G: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; + case DCEM_PORTSPEED_50G: + fc_host_speed(shost) = FC_PORTSPEED_50GBIT; + break; + case DCEM_PORTSPEED_64G: + fc_host_speed(shost) = FC_PORTSPEED_64GBIT; + break; case DCEM_PORTSPEED_100G: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; + case DCEM_PORTSPEED_128G: + fc_host_speed(shost) = FC_PORTSPEED_128GBIT; + break; default: + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unknown FC speed: %d Mbps", port_speed); fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } +/* Placeholder function */ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) { int ret; - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); - struct fc_host_statistics *stats = &lp->host_stats; + struct fnic *fnic = *((struct fnic **) shost_priv(host)); + struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats; struct vnic_stats *vs; unsigned long flags; - if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) + if (time_before + (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) return stats; fnic->stats_time = jiffies; @@ -220,24 +273,22 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { - FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, - "fnic: Get vnic stats failed" - " 0x%x", ret); + FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic: Get vnic stats failed: 0x%x", ret); return stats; } vs = fnic->stats; stats->tx_frames = vs->tx.tx_unicast_frames_ok; - stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; stats->rx_frames = vs->rx.rx_unicast_frames_ok; - stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->invalid_crc_count = vs->rx.rx_crc_errors; stats->seconds_since_last_reset = - (jiffies - fnic->stats_reset_time) / HZ; + (jiffies - fnic->stats_reset_time) / HZ; stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); - return stats; } @@ -318,8 +369,7 @@ void fnic_dump_fchost_stats(struct Scsi_Host *host, static void fnic_reset_host_stats(struct Scsi_Host *host) { int ret; - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(host)); struct fc_host_statistics *stats; unsigned long flags; @@ -332,7 +382,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host) spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { - FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic: Reset vnic stats failed" " 0x%x", ret); return; @@ -351,25 +401,19 @@ void fnic_log_q_error(struct fnic *fnic) for (i = 0; i < fnic->raw_wq_count; i++) { error_status = ioread32(&fnic->wq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "WQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < fnic->rq_count; i++) { error_status = ioread32(&fnic->rq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "RQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < fnic->wq_copy_count; i++) { - error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status); + error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "CWQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status); } } @@ -400,11 +444,10 @@ static int fnic_notify_set(struct fnic *fnic) err = vnic_dev_notify_set(fnic->vdev, -1); break; case VNIC_DEV_INTR_MODE_MSIX: - err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); + err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base); break; default: - shost_printk(KERN_ERR, fnic->lport->host, - "Interrupt mode should be set up" + dev_err(&fnic->pdev->dev, "Interrupt mode should be set up" " before devcmd notify set %d\n", vnic_dev_get_intr_mode(fnic->vdev)); err = -1; @@ -423,13 +466,6 @@ static void fnic_notify_timer(struct timer_list *t) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } -static void fnic_fip_notify_timer(struct timer_list *t) -{ - struct fnic *fnic = from_timer(fnic, t, fip_timer); - - fnic_handle_fip_timer(fnic); -} - static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -486,6 +522,7 @@ static int fnic_cleanup(struct fnic *fnic) { unsigned int i; int err; + int raw_wq_rq_counts; vnic_dev_disable(fnic->vdev); for (i = 0; i < fnic->intr_count; i++) @@ -502,13 +539,14 @@ static int fnic_cleanup(struct fnic *fnic) return err; } for (i = 0; i < fnic->wq_copy_count; i++) { - err = vnic_wq_copy_disable(&fnic->wq_copy[i]); + err = vnic_wq_copy_disable(&fnic->hw_copy_wq[i]); if (err) return err; + raw_wq_rq_counts = fnic->raw_wq_count + fnic->rq_count; + fnic_wq_copy_cmpl_handler(fnic, -1, i + raw_wq_rq_counts); } /* Clean up completed IOs and FCS frames */ - fnic_wq_copy_cmpl_handler(fnic, io_completions); fnic_wq_cmpl_handler(fnic, -1); fnic_rq_cmpl_handler(fnic, -1); @@ -518,7 +556,7 @@ static int fnic_cleanup(struct fnic *fnic) for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); for (i = 0; i < fnic->wq_copy_count; i++) - vnic_wq_copy_clean(&fnic->wq_copy[i], + vnic_wq_copy_clean(&fnic->hw_copy_wq[i], fnic_wq_copy_cleanup_handler); for (i = 0; i < fnic->cq_count; i++) @@ -527,6 +565,8 @@ static int fnic_cleanup(struct fnic *fnic) vnic_intr_clean(&fnic->intr[i]); mempool_destroy(fnic->io_req_pool); + mempool_destroy(fnic->frame_pool); + mempool_destroy(fnic->frame_elem_pool); for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) mempool_destroy(fnic->io_sgl_pool[i]); @@ -539,98 +579,223 @@ static void fnic_iounmap(struct fnic *fnic) iounmap(fnic->bar0.vaddr); } -/** - * fnic_get_mac() - get assigned data MAC address for FIP code. - * @lport: local port. - */ -static u8 *fnic_get_mac(struct fc_lport *lport) +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) { - struct fnic *fnic = lport_priv(lport); + vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} - return fnic->data_src_addr; +static void fnic_scsi_init(struct fnic *fnic) +{ + struct Scsi_Host *host = fnic->host; + + snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, + host->host_no); + + host->transportt = fnic_fc_transport; } -static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +static void fnic_free_ioreq_tables_mq(struct fnic *fnic) { - vnic_dev_set_default_vlan(fnic->vdev, vlan_id); + int hwq; + + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) + kfree(fnic->sw_copy_wq[hwq].io_req_table); +} + +static int fnic_scsi_drv_init(struct fnic *fnic) +{ + struct Scsi_Host *host = fnic->host; + int err; + struct pci_dev *pdev = fnic->pdev; + struct fnic_iport_s *iport = &fnic->iport; + int hwq; + + /* Configure maximum outstanding IO reqs*/ + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) + host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, + max_t(u32, FNIC_MIN_IO_REQ, + fnic->config.io_throttle_count)); + + fnic->fnic_max_tag_id = host->can_queue; + host->max_lun = fnic->config.luns_per_tgt; + host->max_id = FNIC_MAX_FCP_TARGET; + host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; + + host->nr_hw_queues = fnic->wq_copy_count; + + dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", + host->can_queue, host->max_lun); + + dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + host->max_id, host->max_cmd_len, host->nr_hw_queues); + + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { + fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; + fnic->sw_copy_wq[hwq].io_req_table = + kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * + sizeof(struct fnic_io_req *), GFP_KERNEL); + + if (!fnic->sw_copy_wq[hwq].io_req_table) { + fnic_free_ioreq_tables_mq(fnic); + return -ENOMEM; + } + } + + dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", + fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); + + fnic_scsi_init(fnic); + + err = scsi_add_host(fnic->host, &pdev->dev); + if (err) { + dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); + return err; + } + fc_host_maxframe_size(fnic->host) = iport->max_payload_size; + fc_host_dev_loss_tmo(fnic->host) = + fnic->config.port_down_timeout / 1000; + sprintf(fc_host_symbolic_name(fnic->host), + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT; + fc_host_node_name(fnic->host) = iport->wwnn; + fc_host_port_name(fnic->host) = iport->wwpn; + fc_host_supported_classes(fnic->host) = FC_COS_CLASS3; + memset(fc_host_supported_fc4s(fnic->host), 0, + sizeof(fc_host_supported_fc4s(fnic->host))); + fc_host_supported_fc4s(fnic->host)[2] = 1; + fc_host_supported_fc4s(fnic->host)[7] = 1; + fc_host_supported_speeds(fnic->host) = 0; + fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT; + + dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data); + if (fnic->host->shost_data != NULL) { + if (fnic_tgt_id_binding == 0) { + dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); + fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE; + } else { + dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); + fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN; + } + } + + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) { + scsi_remove_host(fnic->host); + return -ENOMEM; + } + + return 0; +} + +void fnic_mq_map_queues_cpus(struct Scsi_Host *host) +{ + struct fnic *fnic = *((struct fnic **) shost_priv(host)); + struct pci_dev *l_pdev = fnic->pdev; + int intr_mode = fnic->config.intr_mode; + struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT]; + + if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) { + FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "intr_mode is not msix\n"); + return; + } + + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "qmap->nr_queues: %d\n", qmap->nr_queues); + + if (l_pdev == NULL) { + FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "l_pdev is null\n"); + return; + } + + blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET); } static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct Scsi_Host *host; - struct fc_lport *lp; + struct Scsi_Host *host = NULL; struct fnic *fnic; mempool_t *pool; - int err; + struct fnic_iport_s *iport; + int err = 0; + int fnic_id = 0; int i; unsigned long flags; + char *desc, *subsys_desc; + int len; /* - * Allocate SCSI Host and set up association between host, - * local port, and fnic + * Allocate fnic */ - lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); - if (!lp) { - printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); + fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL); + if (!fnic) { err = -ENOMEM; - goto err_out; + goto err_out_fnic_alloc; } - host = lp->host; - fnic = lport_priv(lp); - fnic->lport = lp; - fnic->ctlr.lp = lp; - - fnic->link_events = 0; - snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, - host->host_no); - - host->transportt = fnic_fc_transport; + iport = &fnic->iport; - fnic_stats_debugfs_init(fnic); - - /* Setup PCI resources */ - pci_set_drvdata(pdev, fnic); + fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL); + if (fnic_id < 0) { + dev_err(&pdev->dev, "Unable to alloc fnic ID\n"); + err = fnic_id; + goto err_out_ida_alloc; + } fnic->pdev = pdev; + fnic->fnic_num = fnic_id; + + /* Find model name from PCIe subsys ID */ + if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) { + dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc); + + /* Update FDMI model */ + fnic->subsys_desc_len = strlen(subsys_desc); + len = ARRAY_SIZE(fnic->subsys_desc); + if (fnic->subsys_desc_len > len) + fnic->subsys_desc_len = len; + memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len); + dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc); + } else { + fnic->subsys_desc_len = 0; + dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown", + pdev->subsystem_device); + } err = pci_enable_device(pdev); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot enable PCI device, aborting.\n"); - goto err_out_free_hba; + dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n"); + goto err_out_pci_enable_device; } err = pci_request_regions(pdev, DRV_NAME); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot enable PCI resources, aborting\n"); - goto err_out_disable_device; + dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n"); + goto err_out_pci_request_regions; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 64-bit first, and - * fail to 32-bit. + * limitation for the device. Try 47-bit first, and + * fail to 32-bit. Cisco VIC supports 47 bits only. */ - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); if (err) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "No usable DMA configuration " + dev_err(&fnic->pdev->dev, "No usable DMA configuration " "aborting\n"); - goto err_out_release_regions; + goto err_out_set_dma_mask; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - shost_printk(KERN_ERR, fnic->lport->host, - "BAR0 not memory-map'able, aborting.\n"); + dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; - goto err_out_release_regions; + goto err_out_map_bar; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); @@ -638,93 +803,101 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot memory-map BAR0 res hdr, " + dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; - goto err_out_release_regions; + goto err_out_fnic_map_bar; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC registration failed, " + dev_err(&fnic->pdev->dev, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; - goto err_out_iounmap; + goto err_out_dev_register; } err = vnic_dev_cmd_init(fnic->vdev); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vnic_dev_cmd_init() returns %d, aborting\n", + dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n", err); - goto err_out_vnic_unregister; + goto err_out_dev_cmd_init; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC dev open failed, aborting.\n"); - goto err_out_dev_cmd_deinit; + dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n"); + goto err_out_dev_open; } err = vnic_dev_init(fnic->vdev, 0); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC dev init failed, aborting.\n"); - goto err_out_dev_close; + dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n"); + goto err_out_dev_init; } - err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC get MAC addr failed \n"); - goto err_out_dev_close; + dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n"); + goto err_out_dev_mac_addr; } /* set data_src for point-to-point mode and to keep it non-zero */ - memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); + memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Get vNIC configuration failed, " + dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, " "aborting.\n"); - goto err_out_dev_close; + goto err_out_fnic_get_config; } - /* Configure Maximum Outstanding IO reqs*/ - if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { - host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, - max_t(u32, FNIC_MIN_IO_REQ, - fnic->config.io_throttle_count)); + switch (fnic->config.flags & 0xff0) { + case VFCF_FC_INITIATOR: + { + host = + scsi_host_alloc(&fnic_host_template, + sizeof(struct fnic *)); + if (!host) { + dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n"); + err = -ENOMEM; + goto err_out_scsi_host_alloc; + } + *((struct fnic **) shost_priv(host)) = fnic; + + fnic->host = host; + fnic->role = FNIC_ROLE_FCP_INITIATOR; + dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n", + fnic->fnic_num); + } + break; + default: + dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num); + err = -EINVAL; + goto err_out_fnic_role; } - fnic->fnic_max_tag_id = host->can_queue; - host->max_lun = fnic->config.luns_per_tgt; - host->max_id = FNIC_MAX_FCP_TARGET; - host->max_cmd_len = FCOE_MAX_CMD_LEN; + /* Setup PCI resources */ + pci_set_drvdata(pdev, fnic); fnic_get_res_counts(fnic); err = fnic_set_intr_mode(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to set intr mode, " + dev_err(&fnic->pdev->dev, "Failed to set intr mode, " "aborting.\n"); - goto err_out_dev_close; + goto err_out_fnic_set_intr_mode; } err = fnic_alloc_vnic_resources(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to alloc vNIC resources, " + dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, " "aborting.\n"); - goto err_out_clear_intr; + goto err_out_fnic_alloc_vnic_res; } - + dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", + fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); /* initialize all fnic locks */ spin_lock_init(&fnic->fnic_lock); @@ -739,53 +912,56 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fnic->fw_ack_index[i] = -1; } - for (i = 0; i < FNIC_IO_LOCKS; i++) - spin_lock_init(&fnic->io_req_lock[i]); - - err = -ENOMEM; - fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); - if (!fnic->io_req_pool) - goto err_out_free_resources; - pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); - if (!pool) - goto err_out_free_ioreq_pool; + if (!pool) { + err = -ENOMEM; + goto err_out_free_resources; + } fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); - if (!pool) + if (!pool) { + err = -ENOMEM; goto err_out_free_dflt_pool; + } fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache); + if (!pool) { + err = -ENOMEM; + goto err_out_fdls_frame_pool; + } + fnic->frame_pool = pool; + + pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM, + fdls_frame_elem_cache); + if (!pool) { + err = -ENOMEM; + goto err_out_fdls_frame_elem_pool; + } + fnic->frame_elem_pool = pool; + /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; - /* Initialize the FIP fcoe_ctrl struct */ - fnic->ctlr.send = fnic_eth_send; - fnic->ctlr.update_mac = fnic_update_mac; - fnic->ctlr.get_src_addr = fnic_get_mac; if (fnic->config.flags & VFCF_FIP_CAPABLE) { - shost_printk(KERN_INFO, fnic->lport->host, - "firmware supports FIP\n"); + dev_info(&fnic->pdev->dev, "firmware supports FIP\n"); /* enable directed and multicast */ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); - vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); - fnic->set_vlan = fnic_set_vlan; - fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); - timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); + vnic_dev_add_addr(fnic->vdev, iport->hwmac); spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); - INIT_WORK(&fnic->event_work, fnic_handle_event); - skb_queue_head_init(&fnic->fip_frame_queue); - INIT_LIST_HEAD(&fnic->evlist); - INIT_LIST_HEAD(&fnic->vlans); + INIT_LIST_HEAD(&fnic->fip_frame_queue); + INIT_LIST_HEAD(&fnic->vlan_list); + timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0); + timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0); + timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0); + timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0); + fnic->set_vlan = fnic_set_vlan; } else { - shost_printk(KERN_INFO, fnic->lport->host, - "firmware uses non-FIP mode\n"); - fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); - fnic->ctlr.state = FIP_ST_NON_FIP; + dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n"); } fnic->state = FNIC_IN_FC_MODE; @@ -798,9 +974,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to alloc notify buffer, aborting.\n"); - goto err_out_free_max_pool; + dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n"); + goto err_out_fnic_notify_set; } /* Setup notify timer when using MSI interrupts */ @@ -809,186 +984,199 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* allocate RQ buffers and post them to RQ*/ for (i = 0; i < fnic->rq_count; i++) { - vnic_rq_enable(&fnic->rq[i]); err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic_alloc_rq_frame can't alloc " + dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc " "frame\n"); - goto err_out_free_rq_buf; + goto err_out_alloc_rq_buf; } } - /* - * Initialization done with PCI system, hardware, firmware. - * Add host to SCSI - */ - err = scsi_add_host(lp->host, &pdev->dev); - if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic: scsi_add_host failed...exiting\n"); - goto err_out_free_rq_buf; - } - - /* Start local port initiatialization */ + init_completion(&fnic->reset_completion_wait); - lp->link_up = 0; - - lp->max_retry_count = fnic->config.flogi_retries; - lp->max_rport_retry_count = fnic->config.plogi_retries; - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | - FCP_SPPF_CONF_COMPL); + /* Start local port initialization */ + iport->max_flogi_retries = fnic->config.flogi_retries; + iport->max_plogi_retries = fnic->config.plogi_retries; + iport->plogi_timeout = fnic->config.plogi_timeout; + iport->service_params = + (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS | + FNIC_FCP_SP_CONF_CMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) - lp->service_params |= FCP_SPPF_RETRY; + iport->service_params |= FNIC_FCP_SP_RETRY; - lp->boot_time = jiffies; - lp->e_d_tov = fnic->config.ed_tov; - lp->r_a_tov = fnic->config.ra_tov; - lp->link_supported_speeds = FC_PORTSPEED_10GBIT; - fc_set_wwnn(lp, fnic->config.node_wwn); - fc_set_wwpn(lp, fnic->config.port_wwn); + iport->boot_time = jiffies; + iport->e_d_tov = fnic->config.ed_tov; + iport->r_a_tov = fnic->config.ra_tov; + iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT; + iport->wwpn = fnic->config.port_wwn; + iport->wwnn = fnic->config.node_wwn; - fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); + iport->max_payload_size = fnic->config.maxdatafieldsize; - if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, - FCPIO_HOST_EXCH_RANGE_END, NULL)) { - err = -ENOMEM; - goto err_out_remove_scsi_host; + if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) || + (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) || + ((iport->max_payload_size % 4) != 0)) { + iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN; } - fc_lport_init_stats(lp); - fnic->stats_reset_time = jiffies; - - fc_lport_config(lp); - - if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + - sizeof(struct fc_frame_header))) { - err = -EINVAL; - goto err_out_free_exch_mgr; - } - fc_host_maxframe_size(lp->host) = lp->mfs; - fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; + iport->flags |= FNIC_FIRST_LINK_UP; - sprintf(fc_host_symbolic_name(lp->host), - DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback, + 0); - spin_lock_irqsave(&fnic_list_lock, flags); - list_add_tail(&fnic->list, &fnic_list); - spin_unlock_irqrestore(&fnic_list_lock, flags); + fnic->stats_reset_time = jiffies; INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); - skb_queue_head_init(&fnic->frame_queue); - skb_queue_head_init(&fnic->tx_queue); + INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); + + INIT_LIST_HEAD(&fnic->frame_queue); + INIT_LIST_HEAD(&fnic->tx_queue); + INIT_LIST_HEAD(&fnic->tport_event_list); + + INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry, + fdls_schedule_oxid_free_retry_work); + + /* Initialize the oxid reclaim list and work struct */ + INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list); + INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler); /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); + for (i = 0; i < fnic->rq_count; i++) { + if (!ioread32(&fnic->rq[i].ctrl->enable)) + vnic_rq_enable(&fnic->rq[i]); + } for (i = 0; i < fnic->wq_copy_count; i++) - vnic_wq_copy_enable(&fnic->wq_copy[i]); + vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); - fc_fabric_login(lp); + vnic_dev_enable(fnic->vdev); err = fnic_request_intr(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Unable to request irq.\n"); - goto err_out_free_exch_mgr; + dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); + goto err_out_fnic_request_intr; } - vnic_dev_enable(fnic->vdev); + fnic_notify_timer_start(fnic); + + fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE)); + + err = fnic_scsi_drv_init(fnic); + if (err) + goto err_out_scsi_drv_init; + + err = fnic_stats_debugfs_init(fnic); + if (err) { + dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n"); + goto err_out_free_stats_debugfs; + } for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); - fnic_notify_timer_start(fnic); + spin_lock_irqsave(&fnic_list_lock, flags); + list_add_tail(&fnic->list, &fnic_list); + spin_unlock_irqrestore(&fnic_list_lock, flags); return 0; -err_out_free_exch_mgr: - fc_exch_mgr_free(lp); -err_out_remove_scsi_host: - fc_remove_host(lp->host); - scsi_remove_host(lp->host); -err_out_free_rq_buf: - for (i = 0; i < fnic->rq_count; i++) +err_out_free_stats_debugfs: + fnic_stats_debugfs_remove(fnic); + fnic_free_ioreq_tables_mq(fnic); + scsi_remove_host(fnic->host); +err_out_scsi_drv_init: + fnic_free_intr(fnic); +err_out_fnic_request_intr: +err_out_alloc_rq_buf: + for (i = 0; i < fnic->rq_count; i++) { + if (ioread32(&fnic->rq[i].ctrl->enable)) + vnic_rq_disable(&fnic->rq[i]); vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); + } vnic_dev_notify_unset(fnic->vdev); -err_out_free_max_pool: +err_out_fnic_notify_set: + mempool_destroy(fnic->frame_elem_pool); +err_out_fdls_frame_elem_pool: + mempool_destroy(fnic->frame_pool); +err_out_fdls_frame_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); -err_out_free_ioreq_pool: - mempool_destroy(fnic->io_req_pool); err_out_free_resources: fnic_free_vnic_resources(fnic); -err_out_clear_intr: +err_out_fnic_alloc_vnic_res: fnic_clear_intr_mode(fnic); -err_out_dev_close: +err_out_fnic_set_intr_mode: + scsi_host_put(fnic->host); +err_out_fnic_role: +err_out_scsi_host_alloc: +err_out_fnic_get_config: +err_out_dev_mac_addr: +err_out_dev_init: vnic_dev_close(fnic->vdev); -err_out_dev_cmd_deinit: -err_out_vnic_unregister: +err_out_dev_open: +err_out_dev_cmd_init: vnic_dev_unregister(fnic->vdev); -err_out_iounmap: +err_out_dev_register: fnic_iounmap(fnic); -err_out_release_regions: +err_out_fnic_map_bar: +err_out_map_bar: +err_out_set_dma_mask: pci_release_regions(pdev); -err_out_disable_device: +err_out_pci_request_regions: pci_disable_device(pdev); -err_out_free_hba: - fnic_stats_debugfs_remove(fnic); - scsi_host_put(lp->host); -err_out: +err_out_pci_enable_device: + ida_free(&fnic_ida, fnic->fnic_num); +err_out_ida_alloc: + kfree(fnic); +err_out_fnic_alloc: return err; } static void fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); - struct fc_lport *lp = fnic->lport; unsigned long flags; /* - * Mark state so that the workqueue thread stops forwarding - * received frames and link events to the local port. ISR and - * other threads that can queue work items will also stop - * creating work items on the fnic workqueue + * Sometimes when probe() fails and do not exit with an error code, + * remove() gets called with 'drvdata' not set. Avoid a crash by + * adding a defensive check. */ + if (!fnic) + return; + spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) - del_timer_sync(&fnic->notify_timer); - /* * Flush the fnic event queue. After this call, there should * be no event queued for this fnic device in the workqueue */ flush_workqueue(fnic_event_queue); - skb_queue_purge(&fnic->frame_queue); - skb_queue_purge(&fnic->tx_queue); + + fnic_scsi_unload(fnic); + + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + del_timer_sync(&fnic->notify_timer); if (fnic->config.flags & VFCF_FIP_CAPABLE) { - del_timer_sync(&fnic->fip_timer); - skb_queue_purge(&fnic->fip_frame_queue); + del_timer_sync(&fnic->retry_fip_timer); + del_timer_sync(&fnic->fcs_ka_timer); + del_timer_sync(&fnic->enode_ka_timer); + del_timer_sync(&fnic->vn_ka_timer); + + fnic_free_txq(&fnic->fip_frame_queue); fnic_fcoe_reset_vlans(fnic); - fnic_fcoe_evlist_free(fnic); } - /* - * Log off the fabric. This stops all remote ports, dns port, - * logs off the fabric. This flushes all rport, disc, lport work - * before returning - */ - fc_fabric_logoff(fnic->lport); - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->in_remove = 1; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) + del_timer_sync(&fnic->iport.fabric.fdmi_timer); - fcoe_ctlr_destroy(&fnic->ctlr); - fc_lport_destroy(lp); fnic_stats_debugfs_remove(fnic); /* @@ -998,16 +1186,13 @@ static void fnic_remove(struct pci_dev *pdev) */ fnic_cleanup(fnic); - BUG_ON(!skb_queue_empty(&fnic->frame_queue)); - BUG_ON(!skb_queue_empty(&fnic->tx_queue)); - spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); - fc_remove_host(fnic->lport->host); - scsi_remove_host(fnic->lport->host); - fc_exch_mgr_free(fnic->lport); + fnic_free_txq(&fnic->frame_queue); + fnic_free_txq(&fnic->tx_queue); + vnic_dev_notify_unset(fnic->vdev); fnic_free_intr(fnic); fnic_free_vnic_resources(fnic); @@ -1017,7 +1202,11 @@ static void fnic_remove(struct pci_dev *pdev) fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); - scsi_host_put(lp->host); + pci_set_drvdata(pdev, NULL); + ida_free(&fnic_ida, fnic->fnic_num); + fnic_scsi_unload_cleanup(fnic); + scsi_host_put(fnic->host); + kfree(fnic); } static struct pci_driver fnic_driver = { @@ -1093,6 +1282,24 @@ static int __init fnic_init_module(void) goto err_create_fnic_ioreq_slab; } + fdls_frame_cache = kmem_cache_create("fdls_frames", + FNIC_FCOE_FRAME_MAXSZ, + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fdls_frame_cache) { + pr_err("fnic fdls frame cache create failed\n"); + err = -ENOMEM; + goto err_create_fdls_frame_cache; + } + + fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem", + sizeof(struct fnic_frame_list), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fdls_frame_elem_cache) { + pr_err("fnic fdls frame elem cache create failed\n"); + err = -ENOMEM; + goto err_create_fdls_frame_cache_elem; + } + fnic_event_queue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq"); if (!fnic_event_queue) { @@ -1109,6 +1316,19 @@ static int __init fnic_init_module(void) goto err_create_fip_workq; } + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) { + reset_fnic_work_queue = + create_singlethread_workqueue("reset_fnic_work_queue"); + if (!reset_fnic_work_queue) { + pr_err("reset fnic work queue create failed\n"); + err = -ENOMEM; + goto err_create_reset_fnic_workq; + } + spin_lock_init(&reset_fnic_list_lock); + INIT_LIST_HEAD(&reset_fnic_list); + INIT_WORK(&reset_fnic_work, fnic_reset_work_handler); + } + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); @@ -1129,8 +1349,15 @@ static int __init fnic_init_module(void) err_fc_transport: destroy_workqueue(fnic_fip_queue); err_create_fip_workq: + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) + destroy_workqueue(reset_fnic_work_queue); +err_create_reset_fnic_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: + kmem_cache_destroy(fdls_frame_elem_cache); +err_create_fdls_frame_cache_elem: + kmem_cache_destroy(fdls_frame_cache); +err_create_fdls_frame_cache: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); @@ -1147,17 +1374,22 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); - if (fnic_fip_queue) { - flush_workqueue(fnic_fip_queue); + + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) + destroy_workqueue(reset_fnic_work_queue); + + if (fnic_fip_queue) destroy_workqueue(fnic_fip_queue); - } + kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); + kmem_cache_destroy(fdls_frame_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); fnic_fc_trace_free(); fnic_debugfs_terminate(); + ida_destroy(&fnic_ida); } module_init(fnic_init_module); diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c new file mode 100644 index 0000000000000..36a2c12684228 --- /dev/null +++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fnic.h" + +static struct fnic_pcie_device fnic_pcie_device_table[] = { + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA, + "VIC 1280"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI, + "VIC 1240"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE, + "VIC 1285"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE, + "VIC 1227T"}, + + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA, + "VIC 1340"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW, + "VIC 1380"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN, + "C3260-SIOC"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE, + "VIC 1385"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2, + "C3260-SIOC"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT, + "VIC 1387"}, + + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY, + "VIC 1457"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE, + "VIC 1485"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA, + "VIC 1495"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT, + "VIC 1497"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE, + "VIC 1467"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON, + "VIC 1477"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"}, + + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN, + "VIC 15420"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW, + "VIC 15411"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU, + "VIC 15238"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA, + "VIC 15422"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH, + "VIC 15230"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA, + "VIC 15427"}, + + {0,} +}; + +int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, + char **subsys_desc) +{ + unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC; + int max = ARRAY_SIZE(fnic_pcie_device_table); + struct fnic_pcie_device *t = fnic_pcie_device_table; + int index = 0; + + if (pdev->device != device) + return 1; + + while (t->device != 0) { + if (memcmp + ((char *) &pdev->subsystem_device, + (char *) &t->subsystem_device, sizeof(short)) == 0) + break; + t++; + index++; + } + + if (index >= max - 1) { + *desc = NULL; + *subsys_desc = NULL; + return 1; + } + + *desc = fnic_pcie_device_table[index].desc; + *subsys_desc = fnic_pcie_device_table[index].subsys_desc; + return 0; +} diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c index 50488f8e169dc..91f9a00df1d80 100644 --- a/drivers/scsi/fnic/fnic_res.c +++ b/drivers/scsi/fnic/fnic_res.c @@ -42,9 +42,7 @@ int fnic_get_vnic_config(struct fnic *fnic) offsetof(struct vnic_fc_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ - shost_printk(KERN_ERR, fnic->lport->host, \ - "Error getting %s, %d\n", #m, \ - err); \ + dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \ return err; \ } \ } while (0); @@ -69,6 +67,13 @@ int fnic_get_vnic_config(struct fnic *fnic) GET_CONFIG(port_down_timeout); GET_CONFIG(port_down_io_retries); GET_CONFIG(luns_per_tgt); + GET_CONFIG(intr_mode); + GET_CONFIG(wq_copy_count); + + if ((c->flags & (VFCF_FC_INITIATOR)) == 0) { + dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n"); + c->flags |= VFCF_FC_INITIATOR; + } c->wq_enet_desc_count = min_t(u32, VNIC_FNIC_WQ_DESCS_MAX, @@ -143,36 +148,34 @@ int fnic_get_vnic_config(struct fnic *fnic) c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); c->intr_timer_type = c->intr_timer_type; - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC MAC addr %pM " - "wq/wq_copy/rq %d/%d/%d\n", - fnic->ctlr.ctl_src_addr, + /* for older firmware, GET_CONFIG will not return anything */ + if (c->wq_copy_count == 0) + c->wq_copy_count = 1; + + c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count); + + dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n", + fnic->data_src_addr, c->wq_enet_desc_count, c->wq_copy_desc_count, c->rq_desc_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC node wwn %llx port wwn %llx\n", + dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n", c->node_wwn, c->port_wwn); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC ed_tov %d ra_tov %d\n", + dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n", c->ed_tov, c->ra_tov); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC mtu %d intr timer %d\n", + dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n", c->maxdatafieldsize, c->intr_timer); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC flags 0x%x luns per tgt %d\n", + dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n", c->flags, c->luns_per_tgt); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC flogi_retries %d flogi timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n", c->flogi_retries, c->flogi_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC plogi retries %d plogi timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n", c->plogi_retries, c->plogi_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC io throttle count %d link dn timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n", c->io_throttle_count, c->link_down_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC port dn io retries %d port dn timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n", c->port_down_io_retries, c->port_down_timeout); + dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count); + dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode); return 0; } @@ -199,12 +202,19 @@ int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu, void fnic_get_res_counts(struct fnic *fnic) { fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ); - fnic->raw_wq_count = fnic->wq_count - 1; - fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count; + fnic->raw_wq_count = 1; + fnic->wq_copy_count = fnic->config.wq_copy_count; fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ); fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ); fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_INTR_CTRL); + + dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count); } void fnic_free_vnic_resources(struct fnic *fnic) @@ -215,7 +225,7 @@ void fnic_free_vnic_resources(struct fnic *fnic) vnic_wq_free(&fnic->wq[i]); for (i = 0; i < fnic->wq_copy_count; i++) - vnic_wq_copy_free(&fnic->wq_copy[i]); + vnic_wq_copy_free(&fnic->hw_copy_wq[i]); for (i = 0; i < fnic->rq_count; i++) vnic_rq_free(&fnic->rq[i]); @@ -240,16 +250,19 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) intr_mode = vnic_dev_get_intr_mode(fnic->vdev); - shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", + dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n", intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); - shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: " - "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", - fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, - fnic->rq_count, fnic->cq_count, fnic->intr_count); + dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d", + fnic->wq_count, fnic->wq_copy_count, + fnic->raw_wq_count, fnic->rq_count); + + dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n", + fnic->cq_count, fnic->intr_count, + fnic->config.wq_copy_desc_count); /* Allocate Raw WQ used for FCS frames */ for (i = 0; i < fnic->raw_wq_count; i++) { @@ -262,7 +275,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) /* Allocate Copy WQs used for SCSI IOs */ for (i = 0; i < fnic->wq_copy_count; i++) { - err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i], + err = vnic_wq_copy_alloc(fnic->vdev, &fnic->hw_copy_wq[i], (fnic->raw_wq_count + i), fnic->config.wq_copy_desc_count, sizeof(struct fcpio_host_req)); @@ -322,8 +335,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) RES_TYPE_INTR_PBA_LEGACY, 0); if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to hook legacy pba resource\n"); + dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } @@ -369,7 +381,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) } for (i = 0; i < fnic->wq_copy_count; i++) { - vnic_wq_copy_init(&fnic->wq_copy[i], + vnic_wq_copy_init(&fnic->hw_copy_wq[i], 0 /* cq_index 0 - always */, error_interrupt_enable, error_interrupt_offset); @@ -426,8 +438,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) /* init the stats memory by making the first call here */ err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vnic_dev_stats_dump failed - x%x\n", err); + dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err); goto err_out_cleanup; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index f6aadfd9405db..5e5c7636a07a4 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -35,11 +35,13 @@ #include #include #include -#include #include +#include #include "fnic_io.h" #include "fnic.h" +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); + const char *fnic_state_str[] = { [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", @@ -77,6 +79,18 @@ static const char *fcpio_status_str[] = { [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", }; +enum terminate_io_return { + TERM_SUCCESS = 0, + TERM_NO_SC = 1, + TERM_IO_REQ_NOT_FOUND, + TERM_ANOTHER_PORT, + TERM_GSTATE, + TERM_IO_BLOCKED, + TERM_OUT_OF_WQ_DESC, + TERM_TIMED_OUT, + TERM_MISC, +}; + const char *fnic_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) @@ -102,22 +116,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status) return fcpio_status_str[status]; } -static void fnic_cleanup_io(struct fnic *fnic); - -static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, - struct scsi_cmnd *sc) -{ - u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1); - - return &fnic->io_req_lock[hash]; -} - -static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, - int tag) -{ - return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; -} - /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. @@ -140,24 +138,83 @@ static void fnic_release_ioreq_buf(struct fnic *fnic, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); } +static bool +fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2) +{ + u32 *portid = data1; + unsigned int *count = data2; + struct fnic_io_req *io_req = fnic_priv(sc)->io_req; + + if (!io_req || (*portid && (io_req->port_id != *portid))) + return true; + + *count += 1; + return true; +} + +unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid) +{ + unsigned int count = 0; + + fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter, + &portid, &count); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "portid = 0x%x count = %u\n", portid, count); + return count; +} + +unsigned int fnic_count_all_ioreqs(struct fnic *fnic) +{ + return fnic_count_ioreqs(fnic, 0); +} + +static bool +fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2) +{ + struct scsi_device *scsi_device = data1; + unsigned int *count = data2; + + if (sc->device != scsi_device || !fnic_priv(sc)->io_req) + return true; + + *count += 1; + return true; +} + +unsigned int +fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device) +{ + unsigned int count = 0; + + fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter, + scsi_device, &count); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "lun = %p count = %u\n", scsi_device, count); + return count; +} + /* Free up Copy Wq descriptors. Called with copy_wq lock held */ -static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) +static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) { /* if no Ack received from firmware, then nothing to clean */ - if (!fnic->fw_ack_recd[0]) + if (!fnic->fw_ack_recd[hwq]) return 1; /* * Update desc_available count based on number of freed descriptors * Account for wraparound */ - if (wq->to_clean_index <= fnic->fw_ack_index[0]) - wq->ring.desc_avail += (fnic->fw_ack_index[0] + if (wq->to_clean_index <= fnic->fw_ack_index[hwq]) + wq->ring.desc_avail += (fnic->fw_ack_index[hwq] - wq->to_clean_index + 1); else wq->ring.desc_avail += (wq->ring.desc_count - wq->to_clean_index - + fnic->fw_ack_index[0] + 1); + + fnic->fw_ack_index[hwq] + 1); /* * just bump clean index to ack_index+1 accounting for wraparound @@ -165,10 +222,10 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) * to_clean_index and fw_ack_index, both inclusive */ wq->to_clean_index = - (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; + (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count; /* we have processed the acks received so far */ - fnic->fw_ack_recd[0] = 0; + fnic->fw_ack_recd[hwq] = 0; return 0; } @@ -182,17 +239,14 @@ __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, unsigned long clearbits) { unsigned long flags = 0; - unsigned long host_lock_flags = 0; spin_lock_irqsave(&fnic->fnic_lock, flags); - spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags); if (clearbits) fnic->state_flags &= ~st_flags; else fnic->state_flags |= st_flags; - spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags); spin_unlock_irqrestore(&fnic->fnic_lock, flags); return; @@ -205,15 +259,14 @@ __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, */ int fnic_fw_reset_handler(struct fnic *fnic) { - struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; int ret = 0; unsigned long flags; + unsigned int ioreq_count; /* indicate fwreset to io path */ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); - - skb_queue_purge(&fnic->frame_queue); - skb_queue_purge(&fnic->tx_queue); + ioreq_count = fnic_count_all_ioreqs(fnic); /* wait for io cmpl */ while (atomic_read(&fnic->in_flight)) @@ -222,11 +275,13 @@ int fnic_fw_reset_handler(struct fnic *fnic) spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) - free_wq_copy_descs(fnic, wq); + free_wq_copy_descs(fnic, wq, 0); if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; else { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ioreq_count: %u\n", ioreq_count); fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > @@ -240,12 +295,12 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!ret) { atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Issued fw reset\n"); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issued fw reset\n"); } else { fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Failed to issue fw reset\n"); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to issue fw reset\n"); } return ret; @@ -258,45 +313,40 @@ int fnic_fw_reset_handler(struct fnic *fnic) */ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) { - struct vnic_wq_copy *wq = &fnic->wq_copy[0]; + struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; enum fcpio_flogi_reg_format_type format; - struct fc_lport *lp = fnic->lport; u8 gw_mac[ETH_ALEN]; int ret = 0; unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) - free_wq_copy_descs(fnic, wq); + free_wq_copy_descs(fnic, wq, 0); if (!vnic_wq_copy_desc_avail(wq)) { ret = -EAGAIN; goto flogi_reg_ioreq_end; } - if (fnic->ctlr.map_dest) { - eth_broadcast_addr(gw_mac); - format = FCPIO_FLOGI_REG_DEF_DEST; - } else { - memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); - format = FCPIO_FLOGI_REG_GW_DEST; - } + memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); + format = FCPIO_FLOGI_REG_GW_DEST; - if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { + if (fnic->config.flags & VFCF_FIP_CAPABLE) { fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fc_id, gw_mac, - fnic->data_src_addr, - lp->r_a_tov, lp->e_d_tov); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", - fc_id, fnic->data_src_addr, gw_mac); + fnic->iport.fpma, + iport->r_a_tov, iport->e_d_tov); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n", + fc_id, fnic->iport.fpma, gw_mac); } else { fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, format, fc_id, gw_mac); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "FLOGI reg issued fcid %x map %d dest %pM\n", - fc_id, fnic->ctlr.map_dest, gw_mac); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI reg issued fcid 0x%x dest %p\n", + fc_id, gw_mac); } atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); @@ -318,18 +368,23 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, - int sg_count) + int sg_count, + uint32_t mqtag, + uint16_t hwq) { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); - struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned int i; - unsigned long intr_flags; int flags; u8 exch_flags; struct scsi_lun fc_lun; + struct fnic_tport_s *tport; + struct rport_dd_data_s *rdd_data; + + rdd_data = rport->dd_data; + tport = rdd_data->tport; if (sg_count) { /* For each SGE, create a device desc entry */ @@ -366,14 +421,11 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, int_to_scsilun(sc->device->lun, &fc_lun); /* Enqueue the descriptor in the Copy WQ */ - spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); - - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) - free_wq_copy_descs(fnic, wq); + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + free_wq_copy_descs(fnic, wq, hwq); if (unlikely(!vnic_wq_copy_desc_avail(wq))) { - spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "fnic_queue_wq_copy_desc failure - no descriptors\n"); atomic64_inc(&misc_stats->io_cpwq_alloc_failures); return SCSI_MLQUEUE_HOST_BUSY; @@ -387,10 +439,10 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, exch_flags = 0; if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && - (rp->flags & FC_RP_FLAGS_RETRY)) + (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY)) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; - fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag, + fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag, 0, exch_flags, io_req->sgl_cnt, SCSI_SENSE_BUFFERSIZE, io_req->sgl_list_pa, @@ -402,8 +454,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, sc->cmnd, sc->cmd_len, scsi_bufflen(sc), fc_lun.scsi_lun, io_req->port_id, - rport->maxframe_size, rp->r_a_tov, - rp->e_d_tov); + tport->max_payload_size, + tport->r_a_tov, tport->e_d_tov); atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > @@ -411,42 +463,34 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); - spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return 0; } -/* - * fnic_queuecommand - * Routine to send a scsi cdb - * Called with host_lock held and interrupts disabled. - */ -static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) +int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { - const int tag = scsi_cmd_to_rq(sc)->tag; - struct fc_lport *lp = shost_priv(sc->device->host); + struct request *const rq = scsi_cmd_to_rq(sc); + uint32_t mqtag = 0; + void (*done)(struct scsi_cmnd *) = scsi_done; struct fc_rport *rport; struct fnic_io_req *io_req = NULL; - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host)); + struct fnic_iport_s *iport = NULL; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct vnic_wq_copy *wq; - int ret; + int ret = 1; u64 cmd_trace; int sg_count = 0; unsigned long flags = 0; unsigned long ptr; - spinlock_t *io_lock = NULL; int io_lock_acquired = 0; - struct fc_rport_libfc_priv *rp; - - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) - return SCSI_MLQUEUE_HOST_BUSY; - - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) - return SCSI_MLQUEUE_HOST_BUSY; + uint16_t hwq = 0; + struct fnic_tport_s *tport = NULL; + struct rport_dd_data_s *rdd_data; + uint16_t lun0_delay = 0; rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "returning DID_NO_CONNECT for IO as rport is NULL\n"); sc->result = DID_NO_CONNECT << 16; done(sc); @@ -455,49 +499,98 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ ret = fc_remote_port_chkready(rport); if (ret) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "rport is not ready\n"); - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); sc->result = ret; done(sc); return 0; } - rp = rport->dd_data; - if (!rp || rp->rp_state == RPORT_ST_DELETE) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "rport 0x%x removed, returning DID_NO_CONNECT\n", - rport->port_id); + mqtag = blk_mq_unique_tag(rq); + spin_lock_irqsave(&fnic->fnic_lock, flags); + iport = &fnic->iport; - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); - sc->result = DID_NO_CONNECT<<16; + if (iport->state != FNIC_IPORT_STATE_READY) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_NO_CONNECT for IO as iport state: %d\n", + iport->state); + sc->result = DID_NO_CONNECT << 16; done(sc); return 0; } - if (rp->rp_state != RPORT_ST_READY) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", - rport->port_id, rp->rp_state); + /* fc_remote_port_add() may have added the tport to + * fc_transport but dd_data not yet set + */ + rdd_data = rport->dd_data; + tport = rdd_data->tport; + if (!tport || (rdd_data->iport != iport)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "dd_data not yet set in SCSI for rport portid: 0x%x\n", + rport->port_id); + tport = fnic_find_tport_by_fcid(iport, rport->port_id); + if (!tport) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n", + rport->port_id); + sc->result = DID_BUS_BUSY << 16; + done(sc); + return 0; + } - sc->result = DID_IMM_RETRY << 16; + /* Re-assign same params as in fnic_fdls_add_tport */ + rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; + rport->supported_classes = + FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; + /* the dd_data is allocated by fctransport of size dd_fcrport_size */ + rdd_data = rport->dd_data; + rdd_data->tport = tport; + rdd_data->iport = iport; + tport->rport = rport; + tport->flags |= FNIC_FDLS_SCSI_REGISTERED; + } + + if ((tport->state != FDLS_TGT_STATE_READY) + && (tport->state != FDLS_TGT_STATE_ADISC)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_NO_CONNECT for IO as tport state: %d\n", + tport->state); + sc->result = DID_NO_CONNECT << 16; done(sc); return 0; } - if (lp->state != LPORT_ST_READY || !(lp->link_up)) + atomic_inc(&fnic->in_flight); + atomic_inc(&tport->in_flight); + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; + } - atomic_inc(&fnic->in_flight); + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", + fnic->state_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } - /* - * Release host lock, use driver resource specific locks from here. - * Don't re-enable interrupts in case they were disabled prior to the - * caller disabling them. - */ - spin_unlock(lp->host->host_lock); - CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; - CMD_FLAGS(sc) = FNIC_NO_FLAGS; + if (!tport->lun0_delay) { + lun0_delay = 1; + tport->lun0_delay++; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; + fnic_priv(sc)->flags = FNIC_NO_FLAGS; /* Get a new io_req for this SCSI IO */ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); @@ -512,11 +605,12 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ sg_count = scsi_dma_map(sc); if (sg_count < 0) { FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, - tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc)); + mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); mempool_free(io_req, fnic->io_req_pool); goto out; } + io_req->tport = tport; /* Determine the type of scatter/gather list we need */ io_req->sgl_cnt = sg_count; io_req->sgl_type = FNIC_SGL_CACHE_DFLT; @@ -547,43 +641,53 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ } /* - * Will acquire lock defore setting to IO initialized. + * Will acquire lock before setting to IO initialized. */ - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + hwq = blk_mq_unique_tag_to_hwq(mqtag); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); /* initialize rest of io_req */ io_lock_acquired = 1; io_req->port_id = rport->port_id; io_req->start_time = jiffies; - CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; - CMD_SP(sc) = (char *)io_req; - CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; - sc->scsi_done = done; + fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + fnic_priv(sc)->io_req = io_req; + fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; + io_req->sc = sc; + + if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) { + WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n", + fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag)); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req; + io_req->tag = mqtag; /* create copy wq desc and enqueue it */ - wq = &fnic->wq_copy[0]; - ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); + wq = &fnic->hw_copy_wq[hwq]; + atomic64_inc(&fnic_stats->io_stats.ios[hwq]); + ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq); if (ret) { /* * In case another thread cancelled the request, * refetch the pointer under the lock. */ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, - tag, sc, 0, 0, 0, - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); - io_req = (struct fnic_io_req *)CMD_SP(sc); - CMD_SP(sc) = NULL; - CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; - spin_unlock_irqrestore(io_lock, flags); + mqtag, sc, 0, 0, 0, fnic_flags_and_state(sc)); + io_req = fnic_priv(sc)->io_req; + fnic_priv(sc)->io_req = NULL; + if (io_req) + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if (io_req) { fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } atomic_dec(&fnic->in_flight); - /* acquire host lock before returning to SCSI */ - spin_lock(lp->host->host_lock); + atomic_dec(&tport->in_flight); return ret; } else { atomic64_inc(&fnic_stats->io_stats.active_ios); @@ -594,7 +698,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ atomic64_read(&fnic_stats->io_stats.active_ios)); /* REVISIT: Use per IO lock in the final code */ - CMD_FLAGS(sc) |= FNIC_IO_ISSUED; + fnic_priv(sc)->flags |= FNIC_IO_ISSUED; } out: cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | @@ -603,20 +707,25 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ sc->cmnd[5]); FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, - tag, sc, io_req, sg_count, cmd_trace, - (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); + mqtag, sc, io_req, sg_count, cmd_trace, + fnic_flags_and_state(sc)); /* if only we issued IO, will we have the io lock */ if (io_lock_acquired) - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); - /* acquire host lock before returning to SCSI */ - spin_lock(lp->host->host_lock); + atomic_dec(&tport->in_flight); + + if (lun0_delay) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "LUN0 delay\n"); + mdelay(LUN0_DELAY_TIME); + } + return ret; } -DEF_SCSI_QCMD(fnic_queuecommand) /* * fnic_fcpio_fw_reset_cmpl_handler @@ -637,7 +746,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, atomic64_inc(&reset_stats->fw_reset_completions); /* Clean up all outstanding io requests */ - fnic_cleanup_io(fnic); + fnic_cleanup_io(fnic, SCSI_NO_TAG); atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); @@ -649,52 +758,43 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ if (!hdr_status) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "reset cmpl success\n"); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reset cmpl success\n"); /* Ready to send flogi out */ fnic->state = FNIC_IN_ETH_MODE; } else { - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, - "fnic fw_reset : failed %s\n", - fnic_fcpio_status_to_str(hdr_status)); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "reset failed with header status: %s\n", + fnic_fcpio_status_to_str(hdr_status)); - /* - * Unable to change to eth mode, cannot send out flogi - * Change state to fc mode, so that subsequent Flogi - * requests from libFC will cause more attempts to - * reset the firmware. Free the cached flogi - */ fnic->state = FNIC_IN_FC_MODE; atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } } else { - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, - "Unexpected state %s while processing" - " reset cmpl\n", fnic_state_to_str(fnic->state)); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Unexpected state while processing reset completion: %s\n", + fnic_state_to_str(fnic->state)); atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } - /* Thread removing device blocks till firmware reset is complete */ - if (fnic->remove_wait) - complete(fnic->remove_wait); + if (fnic->fw_reset_done) + complete(fnic->fw_reset_done); /* * If fnic is being removed, or fw reset failed * free the flogi frame. Else, send it out */ - if (fnic->remove_wait || ret) { + if (ret) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - skb_queue_purge(&fnic->tx_queue); + fnic_free_txq(&fnic->tx_queue); goto reset_cmpl_handler_end; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->flush_work); reset_cmpl_handler_end: fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); @@ -724,19 +824,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, /* Check flogi registration completion status */ if (!hdr_status) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "flog reg succeeded\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "FLOGI reg succeeded\n"); fnic->state = FNIC_IN_FC_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, - "fnic flogi reg :failed %s\n", + fnic->host, fnic->fnic_num, + "fnic flogi reg failed: %s\n", fnic_fcpio_status_to_str(hdr_status)); fnic->state = FNIC_IN_ETH_MODE; ret = -1; } } else { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Unexpected fnic state %s while" " processing flogi reg completion\n", fnic_state_to_str(fnic->state)); @@ -750,7 +850,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, } spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_flush_tx(fnic); + queue_work(fnic_event_queue, &fnic->flush_work); queue_work(fnic_event_queue, &fnic->frame_work); } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); @@ -793,22 +893,23 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, u16 request_out = desc->u.ack.request_out; unsigned long flags; u64 *ox_id_tag = (u64 *)(void *)desc; + unsigned int wq_index = cq_index; /* mark the ack state */ - wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; - spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + wq = &fnic->hw_copy_wq[cq_index]; + spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags); fnic->fnic_stats.misc_stats.last_ack_time = jiffies; if (is_ack_index_in_range(wq, request_out)) { - fnic->fw_ack_index[0] = request_out; - fnic->fw_ack_recd[0] = 1; + fnic->fw_ack_index[wq_index] = request_out; + fnic->fw_ack_recd[wq_index] = 1; } else atomic64_inc( &fnic->fnic_stats.misc_stats.ack_index_out_of_range); - spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags); FNIC_TRACE(fnic_fcpio_ack_handler, - fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], + fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], ox_id_tag[4], ox_id_tag[5]); } @@ -816,12 +917,12 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, * fnic_fcpio_icmnd_cmpl_handler * Routine to handle icmnd completions */ -static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, +static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_index, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; - struct fcpio_tag tag; + struct fcpio_tag ftag; u32 id; u64 xfer_len = 0; struct fcpio_icmnd_cmpl *icmnd_cmpl; @@ -829,33 +930,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, struct scsi_cmnd *sc; struct fnic_stats *fnic_stats = &fnic->fnic_stats; unsigned long flags; - spinlock_t *io_lock; u64 cmd_trace; unsigned long start_time; unsigned long io_duration_time; + unsigned int hwq = 0; + unsigned int mqtag = 0; + unsigned int tag = 0; /* Decode the cmpl description to get the io_req id */ - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); - fcpio_tag_id_dec(&tag, &id); + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + fcpio_tag_id_dec(&ftag, &id); icmnd_cmpl = &desc->u.icmnd_cmpl; - if (id >= fnic->fnic_max_tag_id) { - shost_printk(KERN_ERR, fnic->lport->host, - "Tag out of range tag %x hdr status = %s\n", - id, fnic_fcpio_status_to_str(hdr_status)); + mqtag = id; + tag = blk_mq_unique_tag_to_tag(mqtag); + hwq = blk_mq_unique_tag_to_hwq(mqtag); + + if (hwq != cq_index) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", + hwq, mqtag, tag, cq_index); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hdr status: %s icmnd completion on the wrong queue\n", + fnic_fcpio_status_to_str(hdr_status)); + } + + if (tag >= fnic->fnic_max_tag_id) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", + hwq, mqtag, tag, cq_index); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hdr status: %s Out of range tag\n", + fnic_fcpio_status_to_str(hdr_status)); return; } + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - sc = scsi_host_find_tag(fnic->lport->host, id); + sc = scsi_host_find_tag(fnic->host, id); WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); - shost_printk(KERN_ERR, fnic->lport->host, + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl sc is null - " "hdr status = %s tag = 0x%x desc = 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, desc); FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, - fnic->lport->host->host_no, id, + fnic->host->host_no, id, ((u64)icmnd_cmpl->_resvd0[1] << 16 | (u64)icmnd_cmpl->_resvd0[0]), ((u64)hdr_status << 16 | @@ -865,15 +986,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, return; } - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + io_req = fnic_priv(sc)->io_req; + if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) { + WARN(1, "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x io_req tag mismatch\n", + __func__, __LINE__, hwq, mqtag, tag); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + return; + } + WARN_ON_ONCE(!io_req); if (!io_req) { atomic64_inc(&fnic_stats->io_stats.ioreq_null); - CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; - spin_unlock_irqrestore(io_lock, flags); - shost_printk(KERN_ERR, fnic->lport->host, + fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, sc); @@ -888,19 +1014,19 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, * if SCSI-ML has already issued abort on this command, * set completion of the IO. The abts path will clean it up */ - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { /* * set the FNIC_IO_DONE so that this doesn't get * flagged as 'out of order' if it was not aborted */ - CMD_FLAGS(sc) |= FNIC_IO_DONE; - CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; - spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_IO_DONE; + fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if(FCPIO_ABORTED == hdr_status) - CMD_FLAGS(sc) |= FNIC_IO_ABORTED; + fnic_priv(sc)->flags |= FNIC_IO_ABORTED; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "icmnd_cmpl abts pending " "hdr status = %s tag = 0x%x sc = 0x%p " "scsi_status = %x residual = %d\n", @@ -912,7 +1038,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, } /* Mark the IO as complete */ - CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; icmnd_cmpl = &desc->u.icmnd_cmpl; @@ -931,6 +1057,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) atomic64_inc(&fnic_stats->misc_stats.queue_fulls); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "xfer_len: %llu", xfer_len); break; case FCPIO_TIMEOUT: /* request was timed out */ @@ -983,12 +1112,16 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, } /* Break link with the SCSI command */ - CMD_SP(sc) = NULL; - CMD_FLAGS(sc) |= FNIC_IO_DONE; + fnic_priv(sc)->io_req = NULL; + io_req->sc = NULL; + fnic_priv(sc)->flags |= FNIC_IO_DONE; + fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; + + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if (hdr_status != FCPIO_SUCCESS) { atomic64_inc(&fnic_stats->io_stats.io_failures); - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); } @@ -1005,22 +1138,19 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, ((u64)icmnd_cmpl->_resvd0[1] << 56 | (u64)icmnd_cmpl->_resvd0[0] << 48 | jiffies_to_msecs(jiffies - start_time)), - desc, cmd_trace, - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + desc, cmd_trace, fnic_flags_and_state(sc)); if (sc->sc_data_direction == DMA_FROM_DEVICE) { - fnic->lport->host_stats.fcp_input_requests++; + fnic_stats->host_stats.fcp_input_requests++; fnic->fcp_input_bytes += xfer_len; } else if (sc->sc_data_direction == DMA_TO_DEVICE) { - fnic->lport->host_stats.fcp_output_requests++; + fnic_stats->host_stats.fcp_output_requests++; fnic->fcp_output_bytes += xfer_len; } else - fnic->lport->host_stats.fcp_control_requests++; + fnic_stats->host_stats.fcp_control_requests++; /* Call SCSI completion function to complete the IO */ - if (sc->scsi_done) - sc->scsi_done(sc); - spin_unlock_irqrestore(io_lock, flags); + scsi_done(sc); mempool_free(io_req, fnic->io_req_pool); @@ -1057,54 +1187,92 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, /* fnic_fcpio_itmf_cmpl_handler * Routine to handle itmf completions */ -static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, +static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_index, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; - struct fcpio_tag tag; + struct fcpio_tag ftag; u32 id; - struct scsi_cmnd *sc; + struct scsi_cmnd *sc = NULL; struct fnic_io_req *io_req; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; - spinlock_t *io_lock; unsigned long start_time; + unsigned int hwq = cq_index; + unsigned int mqtag; + unsigned int tag; + + fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); + fcpio_tag_id_dec(&ftag, &id); + + mqtag = id & FNIC_TAG_MASK; + tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK); + hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK); + + if (hwq != cq_index) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", + hwq, mqtag, tag, cq_index); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hdr status: %s ITMF completion on the wrong queue\n", + fnic_fcpio_status_to_str(hdr_status)); + } + + if (tag > fnic->fnic_max_tag_id) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", + hwq, mqtag, tag, cq_index); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hdr status: %s Tag out of range\n", + fnic_fcpio_status_to_str(hdr_status)); + return; + } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", + hwq, mqtag, tag, cq_index); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hdr status: %s Tag out of range\n", + fnic_fcpio_status_to_str(hdr_status)); + return; + } - fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); - fcpio_tag_id_dec(&tag, &id); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { - shost_printk(KERN_ERR, fnic->lport->host, - "Tag out of range tag %x hdr status = %s\n", - id, fnic_fcpio_status_to_str(hdr_status)); - return; + /* If it is sg3utils allocated SC then tag_id + * is max_tag_id and SC is retrieved from io_req + */ + if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { + io_req = fnic->sw_copy_wq[hwq].io_req_table[tag]; + if (io_req) + sc = io_req->sc; + } else { + sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK); } - sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); - shost_printk(KERN_ERR, fnic->lport->host, + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + shost_printk(KERN_ERR, fnic->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", - fnic_fcpio_status_to_str(hdr_status), id); + fnic_fcpio_status_to_str(hdr_status), tag); return; } - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + + io_req = fnic_priv(sc)->io_req; WARN_ON_ONCE(!io_req); if (!io_req) { atomic64_inc(&fnic_stats->io_stats.ioreq_null); - spin_unlock_irqrestore(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; - shost_printk(KERN_ERR, fnic->lport->host, + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; + shost_printk(KERN_ERR, fnic->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", - fnic_fcpio_status_to_str(hdr_status), id, sc); + fnic_fcpio_status_to_str(hdr_status), tag, sc); return; } start_time = io_req->start_time; @@ -1112,64 +1280,69 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { /* Abort and terminate completion of device reset req */ /* REVISIT : Add asserts about various flags */ - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "dev reset abts cmpl recd. id %x status %s\n", - id, fnic_fcpio_status_to_str(hdr_status)); - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; - CMD_ABTS_STATUS(sc) = hdr_status; - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n", + hwq, mqtag, tag, + fnic_fcpio_status_to_str(hdr_status)); + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; + fnic_priv(sc)->abts_status = hdr_status; + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; if (io_req->abts_done) complete(io_req->abts_done); - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ + shost_printk(KERN_DEBUG, fnic->host, + "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n", + hwq, mqtag, tag, + fnic_fcpio_status_to_str(hdr_status)); switch (hdr_status) { case FCPIO_SUCCESS: break; case FCPIO_TIMEOUT: - if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) atomic64_inc(&abts_stats->abort_fw_timeouts); else atomic64_inc( &term_stats->terminate_fw_timeouts); break; case FCPIO_ITMF_REJECTED: - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "abort reject recd. id %d\n", (int)(id & FNIC_TAG_MASK)); break; case FCPIO_IO_NOT_FOUND: - if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) atomic64_inc(&abts_stats->abort_io_not_found); else atomic64_inc( &term_stats->terminate_io_not_found); break; default: - if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) atomic64_inc(&abts_stats->abort_failures); else atomic64_inc( &term_stats->terminate_failures); break; } - if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { + if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { /* This is a late completion. Ignore it */ - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return; } - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; - CMD_ABTS_STATUS(sc) = hdr_status; + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; + fnic_priv(sc)->abts_status = hdr_status; /* If the status is IO not found consider it as success */ if (hdr_status == FCPIO_IO_NOT_FOUND) - CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; + fnic_priv(sc)->abts_status = FCPIO_SUCCESS; - if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); @@ -1181,87 +1354,89 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, */ if (io_req->abts_done) { complete(io_req->abts_done); - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + shost_printk(KERN_INFO, fnic->host, + "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n", + hwq, mqtag, tag); } else { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "abts cmpl, completing IO\n"); - CMD_SP(sc) = NULL; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n", + hwq, mqtag, + tag, fnic_fcpio_status_to_str(hdr_status)); + fnic_priv(sc)->io_req = NULL; sc->result = (DID_ERROR << 16); - - spin_unlock_irqrestore(io_lock, flags); + fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); - if (sc->scsi_done) { - FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, - sc->device->host->host_no, id, - sc, - jiffies_to_msecs(jiffies - start_time), - desc, - (((u64)hdr_status << 40) | - (u64)sc->cmnd[0] << 32 | - (u64)sc->cmnd[2] << 24 | - (u64)sc->cmnd[3] << 16 | - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - (((u64)CMD_FLAGS(sc) << 32) | - CMD_STATE(sc))); - sc->scsi_done(sc); - atomic64_dec(&fnic_stats->io_stats.active_ios); - if (atomic64_read(&fnic->io_cmpl_skip)) - atomic64_dec(&fnic->io_cmpl_skip); - else - atomic64_inc(&fnic_stats->io_stats.io_completions); - } + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, + sc->device->host->host_no, id, + sc, + jiffies_to_msecs(jiffies - start_time), + desc, + (((u64)hdr_status << 40) | + (u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | + (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); + scsi_done(sc); + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); } - } else if (id & FNIC_TAG_DEV_RST) { /* Completion of device reset */ - CMD_LR_STATUS(sc) = hdr_status; - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { - spin_unlock_irqrestore(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; + shost_printk(KERN_INFO, fnic->host, + "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n", + hwq, mqtag, + tag, fnic_fcpio_status_to_str(hdr_status)); + fnic_priv(sc)->lr_status = hdr_status; + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), - desc, 0, - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Terminate pending " - "dev reset cmpl recd. id %d status %s\n", - (int)(id & FNIC_TAG_MASK), - fnic_fcpio_status_to_str(hdr_status)); + desc, 0, fnic_flags_and_state(sc)); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n", + hwq, mqtag, + tag, fnic_fcpio_status_to_str(hdr_status)); return; } - if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { + if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { /* Need to wait for terminate completion */ - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), - desc, 0, - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + desc, 0, fnic_flags_and_state(sc)); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "dev reset cmpl recd after time out. " "id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); return; } - CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "dev reset cmpl recd. id %d status %s\n", - (int)(id & FNIC_TAG_MASK), - fnic_fcpio_status_to_str(hdr_status)); + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n", + hwq, mqtag, + tag, fnic_fcpio_status_to_str(hdr_status)); if (io_req->dr_done) complete(io_req->dr_done); - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else { - shost_printk(KERN_ERR, fnic->lport->host, - "Unexpected itmf io state %s tag %x\n", - fnic_ioreq_state_to_str(CMD_STATE(sc)), id); - spin_unlock_irqrestore(io_lock, flags); + shost_printk(KERN_ERR, fnic->host, + "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n", + __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } } @@ -1288,17 +1463,19 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, break; } + cq_index -= fnic->copy_wq_base; + switch (desc->hdr.type) { case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_ack_handler(fnic, cq_index, desc); break; case FCPIO_ICMND_CMPL: /* fw completed a command */ - fnic_fcpio_icmnd_cmpl_handler(fnic, desc); + fnic_fcpio_icmnd_cmpl_handler(fnic, cq_index, desc); break; case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ - fnic_fcpio_itmf_cmpl_handler(fnic, desc); + fnic_fcpio_itmf_cmpl_handler(fnic, cq_index, desc); break; case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ @@ -1311,7 +1488,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, break; default: - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "firmware completion type %d\n", desc->hdr.type); break; @@ -1324,10 +1501,8 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, * fnic_wq_copy_cmpl_handler * Routine to process wq copy */ -int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) +int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index) { - unsigned int wq_work_done = 0; - unsigned int i, cq_index; unsigned int cur_work_done; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; u64 start_jiffies = 0; @@ -1335,114 +1510,147 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) u64 delta_jiffies = 0; u64 delta_ms = 0; - for (i = 0; i < fnic->wq_copy_count; i++) { - cq_index = i + fnic->raw_wq_count + fnic->rq_count; - - start_jiffies = jiffies; - cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], - fnic_fcpio_cmpl_handler, - copy_work_to_do); - end_jiffies = jiffies; - - wq_work_done += cur_work_done; - delta_jiffies = end_jiffies - start_jiffies; - if (delta_jiffies > - (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { - atomic64_set(&misc_stats->max_isr_jiffies, - delta_jiffies); - delta_ms = jiffies_to_msecs(delta_jiffies); - atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); - atomic64_set(&misc_stats->corr_work_done, - cur_work_done); - } + start_jiffies = jiffies; + cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], + fnic_fcpio_cmpl_handler, + copy_work_to_do); + end_jiffies = jiffies; + delta_jiffies = end_jiffies - start_jiffies; + if (delta_jiffies > (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { + atomic64_set(&misc_stats->max_isr_jiffies, delta_jiffies); + delta_ms = jiffies_to_msecs(delta_jiffies); + atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); + atomic64_set(&misc_stats->corr_work_done, cur_work_done); } - return wq_work_done; + + return cur_work_done; } static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) { - const int tag = scsi_cmd_to_rq(sc)->tag; + struct request *const rq = scsi_cmd_to_rq(sc); struct fnic *fnic = data; struct fnic_io_req *io_req; - unsigned long flags = 0; - spinlock_t *io_lock; unsigned long start_time = 0; + unsigned long flags; struct fnic_stats *fnic_stats = &fnic->fnic_stats; + uint16_t hwq = 0; + int tag; + int mqtag; + + mqtag = blk_mq_unique_tag(rq); + hwq = blk_mq_unique_tag_to_hwq(mqtag); + tag = blk_mq_unique_tag_to_tag(mqtag); + + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + + fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; - io_lock = fnic_io_lock_tag(fnic, tag); - spin_lock_irqsave(io_lock, flags); + io_req = fnic_priv(sc)->io_req; + if (!io_req) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", + hwq, mqtag, tag, fnic_priv(sc)->flags); + return true; + } - io_req = (struct fnic_io_req *)CMD_SP(sc); - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && - !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { /* * We will be here only when FW completes reset * without sending completions for outstanding ios. */ - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; if (io_req && io_req->dr_done) complete(io_req->dr_done); else if (io_req && io_req->abts_done) complete(io_req->abts_done); - spin_unlock_irqrestore(io_lock, flags); + + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; - } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { - spin_unlock_irqrestore(io_lock, flags); + } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } - if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); - goto cleanup_scsi_cmd; - } - CMD_SP(sc) = NULL; - - spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->io_req = NULL; + io_req->sc = NULL; + start_time = io_req->start_time; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); /* * If there is a scsi_cmnd associated with this io_req, then * free the corresponding state */ - start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); -cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", - tag, sc, jiffies - start_time); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + mqtag, tag, sc, (jiffies - start_time)); if (atomic64_read(&fnic->io_cmpl_skip)) atomic64_dec(&fnic->io_cmpl_skip); else atomic64_inc(&fnic_stats->io_stats.io_completions); - /* Complete the command to SCSI */ - if (sc->scsi_done) { - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) - shost_printk(KERN_ERR, fnic->lport->host, - "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", - tag, sc); - - FNIC_TRACE(fnic_cleanup_io, + FNIC_TRACE(fnic_cleanup_io, sc->device->host->host_no, tag, sc, jiffies_to_msecs(jiffies - start_time), - 0, ((u64)sc->cmnd[0] << 32 | - (u64)sc->cmnd[2] << 24 | - (u64)sc->cmnd[3] << 16 | - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + 0, ((u64) sc->cmnd[0] << 32 | + (u64) sc->cmnd[2] << 24 | + (u64) sc->cmnd[3] << 16 | + (u64) sc->cmnd[4] << 8 | sc->cmnd[5]), + (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)-> + state)); - sc->scsi_done(sc); - } + /* Complete the command to SCSI */ + scsi_done(sc); return true; } -static void fnic_cleanup_io(struct fnic *fnic) +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { - scsi_host_busy_iter(fnic->lport->host, - fnic_cleanup_io_iter, fnic); + unsigned int io_count = 0; + unsigned long flags; + struct fnic_io_req *io_req = NULL; + struct scsi_cmnd *sc = NULL; + + io_count = fnic_count_all_ioreqs(fnic); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Outstanding ioreq count: %d active io count: %lld Waiting\n", + io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + scsi_host_busy_iter(fnic->host, + fnic_cleanup_io_iter, fnic); + + /* with sg3utils device reset, SC needs to be retrieved from ioreq */ + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id]; + if (io_req) { + sc = io_req->sc; + if (sc) { + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + if (io_req && io_req->dr_done) + complete(io_req->dr_done); + } + } + } + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + + while ((io_count = fnic_count_all_ioreqs(fnic))) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Outstanding ioreq count: %d active io count: %lld Waiting\n", + io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + schedule_timeout(msecs_to_jiffies(100)); + } } void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, @@ -1453,8 +1661,8 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fnic_io_req *io_req; struct scsi_cmnd *sc; unsigned long flags; - spinlock_t *io_lock; unsigned long start_time = 0; + uint16_t hwq; /* get the tag reference */ fcpio_tag_id_dec(&desc->hdr.tag, &id); @@ -1463,26 +1671,28 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, if (id >= fnic->fnic_max_tag_id) return; - sc = scsi_host_find_tag(fnic->lport->host, id); + sc = scsi_host_find_tag(fnic->host, id); if (!sc) return; - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + hwq = blk_mq_unique_tag_to_hwq(id); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); /* Get the IO context which this desc refers to */ - io_req = (struct fnic_io_req *)CMD_SP(sc); + io_req = fnic_priv(sc)->io_req; /* fnic interrupts are turned off by now */ if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto wq_copy_cleanup_scsi_cmd; } - CMD_SP(sc) = NULL; + fnic_priv(sc)->io_req = NULL; + io_req->sc = NULL; + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL; - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); @@ -1490,49 +1700,51 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, wq_copy_cleanup_scsi_cmd: sc->result = DID_NO_CONNECT << 16; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:" " DID_NO_CONNECT\n"); - if (sc->scsi_done) { - FNIC_TRACE(fnic_wq_copy_cleanup_handler, - sc->device->host->host_no, id, sc, - jiffies_to_msecs(jiffies - start_time), - 0, ((u64)sc->cmnd[0] << 32 | - (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + FNIC_TRACE(fnic_wq_copy_cleanup_handler, + sc->device->host->host_no, id, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64)sc->cmnd[0] << 32 | + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), + fnic_flags_and_state(sc)); - sc->scsi_done(sc); - } + scsi_done(sc); } static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, u32 task_req, u8 *fc_lun, - struct fnic_io_req *io_req) + struct fnic_io_req *io_req, + unsigned int hwq) { - struct vnic_wq_copy *wq = &fnic->wq_copy[0]; - struct Scsi_Host *host = fnic->lport->host; + struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; + struct fnic_tport_s *tport = io_req->tport; - spin_lock_irqsave(host->host_lock, flags); + spin_lock_irqsave(&fnic->fnic_lock, flags); if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { - spin_unlock_irqrestore(host->host_lock, flags); + atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); return 1; } else atomic_inc(&fnic->in_flight); - spin_unlock_irqrestore(host->host_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); - spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) - free_wq_copy_descs(fnic, wq); + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + free_wq_copy_descs(fnic, wq, hwq); if (!vnic_wq_copy_desc_avail(wq)) { - spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + atomic_dec(&tport->in_flight); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic_queue_abort_io_req: failure: no descriptors\n"); atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); return 1; @@ -1547,7 +1759,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); - spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); return 0; @@ -1561,33 +1773,40 @@ struct fnic_rport_abort_io_iter_data { static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) { + struct request *const rq = scsi_cmd_to_rq(sc); struct fnic_rport_abort_io_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; - int abt_tag = scsi_cmd_to_rq(sc)->tag; + int abt_tag = 0; struct fnic_io_req *io_req; - spinlock_t *io_lock; - unsigned long flags; struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; + uint16_t hwq = 0; + unsigned long flags; - io_lock = fnic_io_lock_tag(fnic, abt_tag); - spin_lock_irqsave(io_lock, flags); + abt_tag = blk_mq_unique_tag(rq); + hwq = blk_mq_unique_tag_to_hwq(abt_tag); - io_req = (struct fnic_io_req *)CMD_SP(sc); + if (!sc) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq); + return true; + } + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (!io_req || io_req->port_id != iter_data->port_id) { - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", - sc); - spin_unlock_irqrestore(io_lock, flags); + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n", + hwq, abt_tag, fnic_priv(sc)->flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } @@ -1595,71 +1814,80 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to rport that went away */ - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { - spin_unlock_irqrestore(io_lock, flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } + if (io_req->abts_done) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic_rport_exch_reset: io_req->abts_done is set " - "state is %s\n", - fnic_ioreq_state_to_str(CMD_STATE(sc))); - } - - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { - shost_printk(KERN_ERR, fnic->lport->host, - "rport_exch_reset " - "IO not yet issued %p tag 0x%x flags " - "%x state %d\n", - sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc)); - } - old_ioreq_state = CMD_STATE(sc); - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + shost_printk(KERN_ERR, fnic->host, + "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n", + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + } + + if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { + shost_printk(KERN_ERR, fnic->host, + "rport_exch_reset IO not yet issued %p abt_tag 0x%x", + sc, abt_tag); + shost_printk(KERN_ERR, fnic->host, + "flags %x state %d\n", fnic_priv(sc)->flags, + fnic_priv(sc)->state); + } + old_ioreq_state = fnic_priv(sc)->state; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { atomic64_inc(&reset_stats->device_reset_terminates); abt_tag |= FNIC_TAG_DEV_RST; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "dev reset sc 0x%p\n", sc); } - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); - BUG_ON(io_req->abts_done); - - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc); + WARN_ON_ONCE(io_req->abts_done); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic_rport_reset_exch: Issuing abts\n"); - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - /* Now queue the abort command to firmware */ + /* Queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, FCPIO_ITMF_ABT_TASK_TERM, - fc_lun.scsi_lun, io_req)) { + fc_lun.scsi_lun, io_req, hwq)) { /* * Revert the cmd state back to old state, if * it hasn't changed in between. This cmd will get * aborted later by scsi_eh, or cleaned up during * lun reset */ - spin_lock_irqsave(io_lock, flags); - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) - CMD_STATE(sc) = old_ioreq_state; - spin_unlock_irqrestore(io_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n", + hwq, abt_tag, fnic_priv(sc)->flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + fnic_priv(sc)->state = old_ioreq_state; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else { - spin_lock_irqsave(io_lock, flags); - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; else - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; - spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic64_inc(&term_stats->terminates); iter_data->term_cnt++; } + return true; } -static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) +void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { + unsigned int io_count = 0; + unsigned long flags; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct fnic_rport_abort_io_iter_data iter_data = { .fnic = fnic, @@ -1667,54 +1895,115 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) .term_cnt = 0, }; - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, - "fnic_rport_exch_reset called portid 0x%06x\n", - port_id); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic rport exchange reset for tport: 0x%06x\n", + port_id); if (fnic->in_remove) return; - scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, + io_count = fnic_count_ioreqs(fnic, port_id); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n", + port_id, io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + /* Bump in_flight counter to hold off fnic_fw_reset_handler. */ + atomic_inc(&fnic->in_flight); + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter, &iter_data); + if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); + atomic_dec(&fnic->in_flight); + + while ((io_count = fnic_count_ioreqs(fnic, port_id))) + schedule_timeout(msecs_to_jiffies(1000)); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "rport: 0x%x remaining portid-io-count: %d ", + port_id, io_count); } void fnic_terminate_rport_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata; - struct fc_lport *lport; - struct fnic *fnic; + struct fnic_tport_s *tport; + struct rport_dd_data_s *rdd_data; + struct fnic_iport_s *iport = NULL; + struct fnic *fnic = NULL; if (!rport) { - printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + pr_err("rport is NULL\n"); return; } - rdata = rport->dd_data; - if (!rdata) { - printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); - return; + rdd_data = rport->dd_data; + if (rdd_data) { + tport = rdd_data->tport; + if (!tport) { + pr_err( + "term rport io called after tport is deleted. Returning 0x%8x\n", + rport->port_id); + } else { + pr_err( + "term rport io called after tport is set 0x%8x\n", + rport->port_id); + pr_err( + "tport maybe rediscovered\n"); + + iport = (struct fnic_iport_s *) tport->iport; + fnic = iport->fnic; + fnic_rport_exch_reset(fnic, rport->port_id); + } } - lport = rdata->local_port; +} - if (!lport) { - printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); - return; - } - fnic = lport_priv(lport); - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, "fnic_terminate_rport_io called" - " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", - rport->port_name, rport->node_name, rport, - rport->port_id); +/* + * FCP-SCSI specific handling for module unload + * + */ +void fnic_scsi_unload(struct fnic *fnic) +{ + unsigned long flags; - if (fnic->in_remove) - return; + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events to the local port. ISR and + * other threads that can queue work items will also stop + * creating work items on the fnic workqueue + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) + fnic_scsi_fcpio_reset(fnic); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->in_remove = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_flush_tport_event_list(fnic); + fnic_delete_fcp_tports(fnic); +} - fnic_rport_exch_reset(fnic, rport->port_id); +void fnic_scsi_unload_cleanup(struct fnic *fnic) +{ + int hwq = 0; + + fc_remove_host(fnic->host); + scsi_remove_host(fnic->host); + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) + kfree(fnic->sw_copy_wq[hwq].io_req_table); } /* @@ -1725,11 +2014,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport) int fnic_abort_cmd(struct scsi_cmnd *sc) { struct request *const rq = scsi_cmd_to_rq(sc); - struct fc_lport *lp; + struct fnic_iport_s *iport; + struct fnic_tport_s *tport; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; - spinlock_t *io_lock; + struct rport_dd_data_s *rdd_data; unsigned long flags; unsigned long start_time = 0; int ret = SUCCESS; @@ -1739,34 +2029,75 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) struct abort_stats *abts_stats; struct terminate_stats *term_stats; enum fnic_ioreq_state old_ioreq_state; - const int tag = rq->tag; + int mqtag; unsigned long abt_issued_time; + uint16_t hwq = 0; + DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ - lp = shost_priv(sc->device->host); + fnic = *((struct fnic **) shost_priv(sc->device->host)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + iport = &fnic->iport; - fnic = lport_priv(lp); fnic_stats = &fnic->fnic_stats; abts_stats = &fnic->fnic_stats.abts_stats; term_stats = &fnic->fnic_stats.term_stats; rport = starget_to_rport(scsi_target(sc->device)); - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, - "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", - rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); + mqtag = blk_mq_unique_tag(rq); + hwq = blk_mq_unique_tag_to_hwq(mqtag); + + fnic_priv(sc)->flags = FNIC_NO_FLAGS; + + rdd_data = rport->dd_data; + tport = rdd_data->tport; + + if (!tport) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Abort cmd called after tport delete! rport fcid: 0x%x", + rport->port_id); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n", + sc->device->lun, hwq, mqtag, + sc->cmnd[0], fnic_priv(sc)->flags); + ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x", + rport->port_id, sc->device->lun, hwq, mqtag); - CMD_FLAGS(sc) = FNIC_NO_FLAGS; + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Op: 0x%x flags: 0x%x\n", + sc->cmnd[0], + fnic_priv(sc)->flags); + + if (iport->state != FNIC_IPORT_STATE_READY) { + atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport NOT in READY state"); + ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } - if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + if ((tport->state != FDLS_TGT_STATE_READY) && + (tport->state != FDLS_TGT_STATE_ADISC)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport state: %d\n", tport->state); ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto fnic_abort_cmd_end; } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* * Avoid a race between SCSI issuing the abort and the device * completing the command. @@ -1777,20 +2108,20 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) * happened, the completion wont actually complete the command * and it will be considered as an aborted command * - * The CMD_SP will not be cleared except while holding io_req_lock. + * .io_req will not be cleared except while holding io_req_lock. */ - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto fnic_abort_cmd_end; } io_req->abts_done = &tm_done; - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { - spin_unlock_irqrestore(io_lock, flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto wait_pending; } @@ -1810,19 +2141,20 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) else atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "CDB Opcode: 0x%02x Abort issued time: %lu msec\n", + sc->cmnd[0], abt_issued_time); /* * Command is still pending, need to abort it * If the firmware completes the command after this point, * the completion wont be done till mid-layer, since abort * has already started. */ - old_ioreq_state = CMD_STATE(sc); - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + old_ioreq_state = fnic_priv(sc)->state; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); /* * Check readiness of the remote port. If the path to remote @@ -1832,30 +2164,30 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; else { - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); task_req = FCPIO_ITMF_ABT_TASK_TERM; } /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); - if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun, - io_req)) { - spin_lock_irqsave(io_lock, flags); - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) - CMD_STATE(sc) = old_ioreq_state; - io_req = (struct fnic_io_req *)CMD_SP(sc); + if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun, + io_req, hwq)) { + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + fnic_priv(sc)->state = old_ioreq_state; + io_req = fnic_priv(sc)->io_req; if (io_req) io_req->abts_done = NULL; - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); ret = FAILED; goto fnic_abort_cmd_end; } if (task_req == FCPIO_ITMF_ABT_TASK) { - CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; + fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED; atomic64_inc(&fnic_stats->abts_stats.aborts); } else { - CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; + fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED; atomic64_inc(&fnic_stats->term_stats.terminates); } @@ -1871,43 +2203,43 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) fnic->config.ed_tov)); /* Check the abort status */ - spin_lock_irqsave(io_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + io_req = fnic_priv(sc)->io_req; if (!io_req) { atomic64_inc(&fnic_stats->io_stats.ioreq_null); - spin_unlock_irqrestore(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; ret = FAILED; goto fnic_abort_cmd_end; } io_req->abts_done = NULL; /* fw did not complete abort, timed out */ - if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { - spin_unlock_irqrestore(io_lock, flags); + if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if (task_req == FCPIO_ITMF_ABT_TASK) { atomic64_inc(&abts_stats->abort_drv_timeouts); } else { atomic64_inc(&term_stats->terminate_drv_timeouts); } - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT; ret = FAILED; goto fnic_abort_cmd_end; } /* IO out of order */ - if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { - spin_unlock_irqrestore(io_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Issuing Host reset due to out of order IO\n"); + if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Issuing host reset due to out of order IO\n"); ret = FAILED; goto fnic_abort_cmd_end; } - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; start_time = io_req->start_time; /* @@ -1915,39 +2247,40 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) * free the io_req if successful. If abort fails, * Device reset will clean the I/O. */ - if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS) - CMD_SP(sc) = NULL; - else { + if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS || + (fnic_priv(sc)->abts_status == FCPIO_ABORTED)) { + fnic_priv(sc)->io_req = NULL; + io_req->sc = NULL; + } else { ret = FAILED; - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto fnic_abort_cmd_end; } - spin_unlock_irqrestore(io_lock, flags); + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); - if (sc->scsi_done) { /* Call SCSI completion function to complete the IO */ - sc->result = (DID_ABORT << 16); - sc->scsi_done(sc); - atomic64_dec(&fnic_stats->io_stats.active_ios); - if (atomic64_read(&fnic->io_cmpl_skip)) - atomic64_dec(&fnic->io_cmpl_skip); - else - atomic64_inc(&fnic_stats->io_stats.io_completions); - } + sc->result = DID_ABORT << 16; + scsi_done(sc); + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); fnic_abort_cmd_end: - FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, + FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Returning from abort cmd type %x %s\n", task_req, (ret == SUCCESS) ? "SUCCESS" : "FAILED"); @@ -1958,29 +2291,37 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, struct scsi_cmnd *sc, struct fnic_io_req *io_req) { - struct vnic_wq_copy *wq = &fnic->wq_copy[0]; - struct Scsi_Host *host = fnic->lport->host; + struct vnic_wq_copy *wq; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; struct scsi_lun fc_lun; int ret = 0; - unsigned long intr_flags; + unsigned long flags; + uint16_t hwq = 0; + uint32_t tag = 0; + struct fnic_tport_s *tport = io_req->tport; + + tag = io_req->tag; + hwq = blk_mq_unique_tag_to_hwq(tag); + wq = &fnic->hw_copy_wq[hwq]; - spin_lock_irqsave(host->host_lock, intr_flags); + spin_lock_irqsave(&fnic->fnic_lock, flags); if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { - spin_unlock_irqrestore(host->host_lock, intr_flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); return FAILED; - } else + } else { atomic_inc(&fnic->in_flight); - spin_unlock_irqrestore(host->host_lock, intr_flags); + atomic_inc(&tport->in_flight); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); - spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) - free_wq_copy_descs(fnic, wq); + if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) + free_wq_copy_descs(fnic, wq, hwq); if (!vnic_wq_copy_desc_avail(wq)) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "queue_dr_io_req failure - no descriptors\n"); atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); ret = -EAGAIN; @@ -1990,7 +2331,8 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, /* fill in the lun info */ int_to_scsilun(sc->device->lun, &fc_lun); - fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST, + tag |= FNIC_TAG_DEV_RST; + fnic_queue_wq_copy_desc_itmf(wq, tag, 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); @@ -2002,8 +2344,9 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); lr_io_req_end: - spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); return ret; } @@ -2017,12 +2360,13 @@ struct fnic_pending_aborts_iter_data { static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) { + struct request *const rq = scsi_cmd_to_rq(sc); struct fnic_pending_aborts_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; struct scsi_device *lun_dev = iter_data->lun_dev; - int abt_tag = scsi_cmd_to_rq(sc)->tag; + unsigned long abt_tag = 0; + uint16_t hwq = 0; struct fnic_io_req *io_req; - spinlock_t *io_lock; unsigned long flags; struct scsi_lun fc_lun; DECLARE_COMPLETION_ONSTACK(tm_done); @@ -2031,11 +2375,13 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) if (sc == iter_data->lr_sc || sc->device != lun_dev) return true; - io_lock = fnic_io_lock_tag(fnic, abt_tag); - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + abt_tag = blk_mq_unique_tag(rq); + hwq = blk_mq_unique_tag_to_hwq(abt_tag); + + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } @@ -2043,28 +2389,27 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Found IO in %s on lun\n", - fnic_ioreq_state_to_str(CMD_STATE(sc))); + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { - spin_unlock_irqrestore(io_lock, flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "%s dev rst not pending sc 0x%p\n", __func__, - sc); - spin_unlock_irqrestore(io_lock, flags); + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && + (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "dev rst not pending sc 0x%p\n", sc); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } if (io_req->abts_done) - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "%s: io_req->abts_done is set state is %s\n", - __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); - old_ioreq_state = CMD_STATE(sc); + __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + old_ioreq_state = fnic_priv(sc)->state; /* * Any pending IO issued prior to reset is expected to be * in abts pending state, if not we need to set @@ -2072,70 +2417,74 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) * When IO is completed, the IO will be handed over and * handled in this function. */ - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; BUG_ON(io_req->abts_done); - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { - abt_tag |= FNIC_TAG_DEV_RST; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "%s: dev rst sc 0x%p\n", __func__, sc); + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "dev rst sc 0x%p\n", sc); } - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; io_req->abts_done = &tm_done; - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, FCPIO_ITMF_ABT_TASK_TERM, - fc_lun.scsi_lun, io_req)) { - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + fc_lun.scsi_lun, io_req, hwq)) { + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (io_req) io_req->abts_done = NULL; - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) - CMD_STATE(sc) = old_ioreq_state; - spin_unlock_irqrestore(io_lock, flags); + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) + fnic_priv(sc)->state = old_ioreq_state; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); iter_data->ret = FAILED; + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "hwq: %d abt_tag: 0x%lx Abort could not be queued\n", + hwq, abt_tag); return false; } else { - spin_lock_irqsave(io_lock, flags); - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; - spin_unlock_irqrestore(io_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; + fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; wait_for_completion_timeout(&tm_done, msecs_to_jiffies (fnic->config.ed_tov)); /* Recheck cmd state to check if it is now aborted */ - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; return true; } io_req->abts_done = NULL; /* if abort is still pending with fw, fail */ - if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { - spin_unlock_irqrestore(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; + if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; iter_data->ret = FAILED; return false; } - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; /* original sc used for lr is handled by dev reset code */ - if (sc != iter_data->lr_sc) - CMD_SP(sc) = NULL; - spin_unlock_irqrestore(io_lock, flags); + if (sc != iter_data->lr_sc) { + fnic_priv(sc)->io_req = NULL; + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL; + } + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); /* original sc used for lr is handled by dev reset code */ if (sc != iter_data->lr_sc) { @@ -2147,11 +2496,10 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) * Any IO is returned during reset, it needs to call scsi_done * to return the scsi_cmnd to upper layer. */ - if (sc->scsi_done) { - /* Set result to let upper SCSI layer retry */ - sc->result = DID_RESET << 16; - sc->scsi_done(sc); - } + /* Set result to let upper SCSI layer retry */ + sc->result = DID_RESET << 16; + scsi_done(sc); + return true; } @@ -2166,17 +2514,16 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, bool new_sc) { - int ret = SUCCESS; + int ret = 0; struct fnic_pending_aborts_iter_data iter_data = { .fnic = fnic, .lun_dev = lr_sc->device, .ret = SUCCESS, }; - if (new_sc) - iter_data.lr_sc = lr_sc; + iter_data.lr_sc = lr_sc; - scsi_host_busy_iter(fnic->lport->host, + scsi_host_busy_iter(fnic->host, fnic_pending_aborts_iter, &iter_data); if (iter_data.ret == FAILED) { ret = iter_data.ret; @@ -2186,45 +2533,14 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, /* walk again to check, if IOs are still pending in fw */ if (fnic_is_abts_pending(fnic, lr_sc)) - ret = FAILED; + ret = 1; clean_pending_aborts_end: + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "exit status: %d\n", ret); return ret; } -/* - * fnic_scsi_host_start_tag - * Allocates tagid from host's tag list - **/ -static inline int -fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) -{ - struct request *rq = scsi_cmd_to_rq(sc); - struct request_queue *q = rq->q; - struct request *dummy; - - dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT); - if (IS_ERR(dummy)) - return SCSI_NO_TAG; - - rq->tag = dummy->tag; - sc->host_scribble = (unsigned char *)dummy; - - return dummy->tag; -} - -/* - * fnic_scsi_host_end_tag - * frees tag allocated by fnic_scsi_host_start_tag. - **/ -static inline void -fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) -{ - struct request *dummy = (struct request *)sc->host_scribble; - - blk_mq_free_request(dummy); -} - /* * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN * fail to get aborted. It calls driver's eh_device_reset with a SCSI command @@ -2233,66 +2549,101 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) int fnic_device_reset(struct scsi_cmnd *sc) { struct request *rq = scsi_cmd_to_rq(sc); - struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; int status; + int count = 0; int ret = FAILED; - spinlock_t *io_lock; unsigned long flags; unsigned long start_time = 0; struct scsi_lun fc_lun; struct fnic_stats *fnic_stats; struct reset_stats *reset_stats; - int tag = rq->tag; + int mqtag = rq->tag; DECLARE_COMPLETION_ONSTACK(tm_done); - int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ bool new_sc = 0; + uint16_t hwq = 0; + struct fnic_iport_s *iport = NULL; + struct rport_dd_data_s *rdd_data; + struct fnic_tport_s *tport; + u32 old_soft_reset_count; + u32 old_link_down_cnt; + int exit_dr = 0; /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ - lp = shost_priv(sc->device->host); + fnic = *((struct fnic **) shost_priv(sc->device->host)); + iport = &fnic->iport; - fnic = lport_priv(lp); fnic_stats = &fnic->fnic_stats; - reset_stats = &fnic->fnic_stats.reset_stats; + reset_stats = &fnic_stats->reset_stats; atomic64_inc(&reset_stats->device_resets); rport = starget_to_rport(scsi_target(sc->device)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n", - rport->port_id, sc->device->lun, sc); - if (lp->state != LPORT_ST_READY || !(lp->link_up)) + spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", + rport->port_id, sc->device->lun, hwq, mqtag, + fnic_priv(sc)->flags); + + rdd_data = rport->dd_data; + tport = rdd_data->tport; + if (!tport) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n", + rport->port_id, sc->device->lun); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport NOT in READY state"); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; + } + + if ((tport->state != FDLS_TGT_STATE_READY) && + (tport->state != FDLS_TGT_STATE_ADISC)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport state: %d\n", tport->state); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto fnic_device_reset_end; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* Check if remote port up */ if (fc_remote_port_chkready(rport)) { - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); goto fnic_device_reset_end; } - CMD_FLAGS(sc) = FNIC_DEVICE_RESET; - /* Allocate tag if not present */ + fnic_priv(sc)->flags = FNIC_DEVICE_RESET; - if (unlikely(tag < 0)) { + if (unlikely(mqtag < 0)) { /* - * Really should fix the midlayer to pass in a proper - * request for ioctls... + * For device reset issued through sg3utils, we let + * only one LUN_RESET to go through and use a special + * tag equal to max_tag_id so that we don't have to allocate + * or free it. It won't interact with tags + * allocated by mid layer. */ - tag = fnic_scsi_host_start_tag(fnic, sc); - if (unlikely(tag == SCSI_NO_TAG)) - goto fnic_device_reset_end; - tag_gen_flag = 1; + mutex_lock(&fnic->sgreset_mutex); + mqtag = fnic->fnic_max_tag_id; new_sc = 1; + } else { + mqtag = blk_mq_unique_tag(rq); + hwq = blk_mq_unique_tag_to_hwq(mqtag); } - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; /* * If there is a io_req attached to this command, then use it, @@ -2301,34 +2652,49 @@ int fnic_device_reset(struct scsi_cmnd *sc) if (!io_req) { io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto fnic_device_reset_end; } memset(io_req, 0, sizeof(*io_req)); io_req->port_id = rport->port_id; - CMD_SP(sc) = (char *)io_req; + io_req->tag = mqtag; + fnic_priv(sc)->io_req = io_req; + io_req->tport = tport; + io_req->sc = sc; + + if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) + WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n", + fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); + + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = + io_req; } io_req->dr_done = &tm_done; - CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; - CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; - spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; + fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag); /* * issue the device reset, if enqueue failed, clean up the ioreq * and break assoc with scsi cmd */ if (fnic_queue_dr_io_req(fnic, sc, io_req)) { - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (io_req) io_req->dr_done = NULL; goto fnic_device_reset_clean; } - spin_lock_irqsave(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED; - spin_unlock_irqrestore(io_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + old_link_down_cnt = iport->fnic->link_down_cnt; + old_soft_reset_count = fnic->soft_reset_count; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* * Wait on the local completion for LUN reset. The io_req may be @@ -2337,17 +2703,42 @@ int fnic_device_reset(struct scsi_cmnd *sc) wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + /* + * Wake up can be due to the following reasons: + * 1) The device reset completed from target. + * 2) Device reset timed out. + * 3) A link-down/host_reset may have happened in between. + * 4) The device reset was aborted and io_req->dr_done was called. + */ + + exit_dr = 0; + spin_lock_irqsave(&fnic->fnic_lock, flags); + if ((old_link_down_cnt != fnic->link_down_cnt) || + (fnic->reset_in_progress) || + (fnic->soft_reset_count != old_soft_reset_count) || + (iport->state != FNIC_IPORT_STATE_READY)) + exit_dr = 1; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "io_req is null tag 0x%x sc 0x%p\n", tag, sc); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); goto fnic_device_reset_end; } + + if (exit_dr) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Host reset called for fnic. Exit device reset\n"); + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } io_req->dr_done = NULL; - status = CMD_LR_STATUS(sc); + status = fnic_priv(sc)->lr_status; /* * If lun reset not completed, bail out with failed. io_req @@ -2355,64 +2746,22 @@ int fnic_device_reset(struct scsi_cmnd *sc) */ if (status == FCPIO_INVALID_CODE) { atomic64_inc(&reset_stats->device_reset_timeouts); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Device reset timed out\n"); - CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; - spin_unlock_irqrestore(io_lock, flags); + fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; int_to_scsilun(sc->device->lun, &fc_lun); - /* - * Issue abort and terminate on device reset request. - * If q'ing of terminate fails, retry it after a delay. - */ - while (1) { - spin_lock_irqsave(io_lock, flags); - if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { - spin_unlock_irqrestore(io_lock, flags); - break; - } - spin_unlock_irqrestore(io_lock, flags); - if (fnic_queue_abort_io_req(fnic, - tag | FNIC_TAG_DEV_RST, - FCPIO_ITMF_ABT_TASK_TERM, - fc_lun.scsi_lun, io_req)) { - wait_for_completion_timeout(&tm_done, - msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); - } else { - spin_lock_irqsave(io_lock, flags); - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; - io_req->abts_done = &tm_done; - spin_unlock_irqrestore(io_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Abort and terminate issued on Device reset " - "tag 0x%x sc 0x%p\n", tag, sc); - break; - } - } - while (1) { - spin_lock_irqsave(io_lock, flags); - if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { - spin_unlock_irqrestore(io_lock, flags); - wait_for_completion_timeout(&tm_done, - msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); - break; - } else { - io_req = (struct fnic_io_req *)CMD_SP(sc); - io_req->abts_done = NULL; - goto fnic_device_reset_clean; - } - } + goto fnic_device_reset_clean; } else { - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } /* Completed, but not successful, clean up the io_req, return fail */ if (status != FCPIO_SUCCESS) { - spin_lock_irqsave(io_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, + fnic->host, fnic->fnic_num, "Device reset completed - failed\n"); - io_req = (struct fnic_io_req *)CMD_SP(sc); + io_req = fnic_priv(sc)->io_req; goto fnic_device_reset_clean; } @@ -2424,26 +2773,28 @@ int fnic_device_reset(struct scsi_cmnd *sc) * succeeds */ if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Device reset failed" - " since could not abort all IOs\n"); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Device reset failed: Cannot abort all IOs\n"); goto fnic_device_reset_clean; } /* Clean lun reset command */ - spin_lock_irqsave(io_lock, flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + io_req = fnic_priv(sc)->io_req; if (io_req) /* Completed, and successful */ ret = SUCCESS; fnic_device_reset_clean: - if (io_req) - CMD_SP(sc) = NULL; + if (io_req) { + fnic_priv(sc)->io_req = NULL; + io_req->sc = NULL; + fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL; + } - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if (io_req) { start_time = io_req->start_time; @@ -2451,19 +2802,40 @@ int fnic_device_reset(struct scsi_cmnd *sc) mempool_free(io_req, fnic->io_req_pool); } + /* + * If link-event is seen while LUN reset is issued we need + * to complete the LUN reset here + */ + if (!new_sc) { + sc->result = DID_RESET << 16; + scsi_done(sc); + } + fnic_device_reset_end: FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); + fnic_flags_and_state(sc)); + + if (new_sc) { + fnic->sgreset_sc = NULL; + mutex_unlock(&fnic->sgreset_mutex); + } - /* free tag if it is allocated */ - if (unlikely(tag_gen_flag)) - fnic_scsi_host_end_tag(fnic, sc); + while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { + if (count >= 2) { + ret = FAILED; + break; + } + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Cannot clean up all IOs for the LUN\n"); + schedule_timeout(msecs_to_jiffies(1000)); + count++; + } - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); @@ -2474,68 +2846,78 @@ int fnic_device_reset(struct scsi_cmnd *sc) return ret; } -/* Clean up all IOs, clean up libFC local port */ -int fnic_reset(struct Scsi_Host *shost) +static void fnic_post_flogo_linkflap(struct fnic *fnic) +{ + unsigned long flags; + + fnic_fdls_link_status_change(fnic, 0); + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->link_status) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fdls_link_status_change(fnic, 1); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/* Logout from all the targets and simulate link flap */ +void fnic_reset(struct Scsi_Host *shost) { - struct fc_lport *lp; struct fnic *fnic; - int ret = 0; struct reset_stats *reset_stats; - lp = shost_priv(shost); - fnic = lport_priv(lp); + fnic = *((struct fnic **) shost_priv(shost)); reset_stats = &fnic->fnic_stats.reset_stats; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "fnic_reset called\n"); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issuing fnic reset\n"); atomic64_inc(&reset_stats->fnic_resets); + fnic_post_flogo_linkflap(fnic); - /* - * Reset local port, this will clean up libFC exchanges, - * reset remote port sessions, and if link is up, begin flogi - */ - ret = fc_lport_reset(lp); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Returning from fnic reset"); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "Returning from fnic reset %s\n", - (ret == 0) ? - "SUCCESS" : "FAILED"); + atomic64_inc(&reset_stats->fnic_reset_completions); +} - if (ret == 0) - atomic64_inc(&reset_stats->fnic_reset_completions); - else - atomic64_inc(&reset_stats->fnic_reset_failures); +int fnic_issue_fc_host_lip(struct Scsi_Host *shost) +{ + int ret = 0; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FC host lip issued"); + ret = fnic_host_reset(shost); return ret; } -/* - * SCSI Error handling calls driver's eh_host_reset if all prior - * error handling levels return FAILED. If host reset completes - * successfully, and if link is up, then Fabric login begins. - * - * Host Reset is the highest level of error recovery. If this fails, then - * host is offlined by SCSI. - * - */ -int fnic_host_reset(struct scsi_cmnd *sc) +int fnic_host_reset(struct Scsi_Host *shost) { - int ret; + int ret = SUCCESS; unsigned long wait_host_tmo; - struct Scsi_Host *shost = sc->device->host; - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (!fnic->internal_reset_inprogress) { - fnic->internal_reset_inprogress = true; + if (fnic->reset_in_progress == NOT_IN_PROGRESS) { + fnic->reset_in_progress = IN_PROGRESS; } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "host reset in progress skipping another host reset\n"); - return SUCCESS; + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(10000)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->reset_in_progress == IN_PROGRESS) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Firmware reset in progress. Skipping another host reset\n"); + return SUCCESS; + } + fnic->reset_in_progress = IN_PROGRESS; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); @@ -2544,150 +2926,49 @@ int fnic_host_reset(struct scsi_cmnd *sc) * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ - ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; - if (ret == SUCCESS) { + fnic_reset(shost); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); + fnic->soft_reset_count++; + + /* wait till the link is up */ + if (fnic->link_status) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; while (time_before(jiffies, wait_host_tmo)) { - if ((lp->state == LPORT_ST_READY) && - (lp->link_up)) { + if (iport->state != FNIC_IPORT_STATE_READY + && fnic->link_status) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + ssleep(1); + spin_lock_irqsave(&fnic->fnic_lock, flags); + } else { ret = SUCCESS; break; } - ssleep(1); } } - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->internal_reset_inprogress = false; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return ret; -} - -/* - * This fxn is called from libFC when host is removed - */ -void fnic_scsi_abort_io(struct fc_lport *lp) -{ - int err = 0; - unsigned long flags; - enum fnic_state old_state; - struct fnic *fnic = lport_priv(lp); - DECLARE_COMPLETION_ONSTACK(remove_wait); - - /* Issue firmware reset for fnic, wait for reset to complete */ -retry_fw_reset: - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && - fnic->link_events) { - /* fw reset is in progress, poll for its completion */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - schedule_timeout(msecs_to_jiffies(100)); - goto retry_fw_reset; - } - - fnic->remove_wait = &remove_wait; - old_state = fnic->state; - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); - err = fnic_fw_reset_handler(fnic); - if (err) { - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) - fnic->state = old_state; - fnic->remove_wait = NULL; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - /* Wait for firmware reset to complete */ - wait_for_completion_timeout(&remove_wait, - msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->remove_wait = NULL; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "fnic_scsi_abort_io %s\n", - (fnic->state == FNIC_IN_ETH_MODE) ? - "SUCCESS" : "FAILED"); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - -} - -/* - * This fxn called from libFC to clean up driver IO state on link down - */ -void fnic_scsi_cleanup(struct fc_lport *lp) -{ - unsigned long flags; - enum fnic_state old_state; - struct fnic *fnic = lport_priv(lp); - - /* issue fw reset */ -retry_fw_reset: - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { - /* fw reset is in progress, poll for its completion */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - schedule_timeout(msecs_to_jiffies(100)); - goto retry_fw_reset; - } - old_state = fnic->state; - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - if (fnic_fw_reset_handler(fnic)) { - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) - fnic->state = old_state; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - } - -} - -void fnic_empty_scsi_cleanup(struct fc_lport *lp) -{ -} - -void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) -{ - struct fnic *fnic = lport_priv(lp); - - /* Non-zero sid, nothing to do */ - if (sid) - goto call_fc_exch_mgr_reset; - - if (did) { - fnic_rport_exch_reset(fnic, did); - goto call_fc_exch_mgr_reset; - } - - /* - * sid = 0, did = 0 - * link down or device being removed - */ - if (!fnic->in_remove) - fnic_scsi_cleanup(lp); - else - fnic_scsi_abort_io(lp); - - /* call libFC exch mgr reset to reset its exchanges */ -call_fc_exch_mgr_reset: - fc_exch_mgr_reset(lp, sid, did); - + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "host reset return status: %d\n", ret); + return ret; } static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) { + struct request *const rq = scsi_cmd_to_rq(sc); struct fnic_pending_aborts_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; int cmd_state; struct fnic_io_req *io_req; - spinlock_t *io_lock; unsigned long flags; + uint16_t hwq = 0; + int tag; + + tag = blk_mq_unique_tag(rq); + hwq = blk_mq_unique_tag_to_hwq(tag); /* * ignore this lun reset cmd or cmds that do not belong to @@ -2698,12 +2979,11 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) if (iter_data->lun_dev && sc->device != iter_data->lun_dev) return true; - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - io_req = (struct fnic_io_req *)CMD_SP(sc); + io_req = fnic_priv(sc)->io_req; if (!io_req) { - spin_unlock_irqrestore(io_lock, flags); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } @@ -2711,11 +2991,12 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "Found IO in %s on lun\n", - fnic_ioreq_state_to_str(CMD_STATE(sc))); - cmd_state = CMD_STATE(sc); - spin_unlock_irqrestore(io_lock, flags); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "hwq: %d tag: 0x%x Found IO in state: %s on lun\n", + hwq, tag, + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); + cmd_state = fnic_priv(sc)->state; + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); if (cmd_state == FNIC_IOREQ_ABTS_PENDING) iter_data->ret = 1; @@ -2743,8 +3024,81 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) } /* walk again to check, if IOs are still pending in fw */ - scsi_host_busy_iter(fnic->lport->host, + scsi_host_busy_iter(fnic->host, fnic_abts_pending_iter, &iter_data); return iter_data.ret; } + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. If host reset completes + * successfully, and if link is up, then Fabric login begins. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + * + */ +int fnic_eh_host_reset_handler(struct scsi_cmnd *sc) +{ + int ret = 0; + struct Scsi_Host *shost = sc->device->host; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "SCSI error handling: fnic host reset"); + + ret = fnic_host_reset(shost); + return ret; +} + + +void fnic_scsi_fcpio_reset(struct fnic *fnic) +{ + unsigned long flags; + enum fnic_state old_state; + struct fnic_iport_s *iport = &fnic->iport; + DECLARE_COMPLETION_ONSTACK(fw_reset_done); + int time_remain; + + /* issue fw reset */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { + /* fw reset is in progress, poll for its completion */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic is in unexpected state: %d for fw_reset\n", + fnic->state); + return; + } + + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, iport->hwmac); + fnic->fw_reset_done = &fw_reset_done; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issuing fw reset\n"); + if (fnic_fw_reset_handler(fnic)) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } else { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Waiting for fw completion\n"); + time_remain = wait_for_completion_timeout(&fw_reset_done, + msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Woken up after fw completion timeout\n"); + if (time_remain == 0) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FW reset completion timed out after %d ms)\n", + FNIC_FW_RESET_TIMEOUT); + } + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); + } + fnic->fw_reset_done = NULL; +} diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h index 086f729f3c46c..c478a29b5ccc7 100644 --- a/drivers/scsi/fnic/fnic_stats.h +++ b/drivers/scsi/fnic/fnic_stats.h @@ -16,6 +16,8 @@ */ #ifndef _FNIC_STATS_H_ #define _FNIC_STATS_H_ +#define FNIC_MQ_MAX_QUEUES 64 +#include struct stats_timestamps { struct timespec64 last_reset_time; @@ -40,6 +42,7 @@ struct io_path_stats { atomic64_t io_btw_10000_to_30000_msec; atomic64_t io_greater_than_30000_msec; atomic64_t current_max_io_time; + atomic64_t ios[FNIC_MQ_MAX_QUEUES]; }; struct abort_stats { @@ -75,6 +78,7 @@ struct reset_stats { atomic64_t fw_resets; atomic64_t fw_reset_completions; atomic64_t fw_reset_failures; + atomic64_t fw_reset_timeouts; atomic64_t fnic_resets; atomic64_t fnic_reset_completions; atomic64_t fnic_reset_failures; @@ -114,9 +118,51 @@ struct misc_stats { atomic64_t no_icmnd_itmf_cmpls; atomic64_t check_condition; atomic64_t queue_fulls; - atomic64_t rport_not_ready; + atomic64_t tport_not_ready; + atomic64_t iport_not_ready; atomic64_t frame_errors; atomic64_t current_port_speed; + atomic64_t intx_dummy; + atomic64_t port_speed_in_mbps; +}; + +struct fnic_iport_stats { + atomic64_t num_linkdn; + atomic64_t num_linkup; + atomic64_t link_failure_count; + atomic64_t num_rscns; + atomic64_t rscn_redisc; + atomic64_t rscn_not_redisc; + atomic64_t frame_err; + atomic64_t num_rnid; + atomic64_t fabric_flogi_sent; + atomic64_t fabric_flogi_ls_accepts; + atomic64_t fabric_flogi_ls_rejects; + atomic64_t fabric_flogi_misc_rejects; + atomic64_t fabric_plogi_sent; + atomic64_t fabric_plogi_ls_accepts; + atomic64_t fabric_plogi_ls_rejects; + atomic64_t fabric_plogi_misc_rejects; + atomic64_t fabric_scr_sent; + atomic64_t fabric_scr_ls_accepts; + atomic64_t fabric_scr_ls_rejects; + atomic64_t fabric_scr_misc_rejects; + atomic64_t fabric_logo_sent; + atomic64_t tport_alive; + atomic64_t tport_plogi_sent; + atomic64_t tport_plogi_ls_accepts; + atomic64_t tport_plogi_ls_rejects; + atomic64_t tport_plogi_misc_rejects; + atomic64_t tport_prli_sent; + atomic64_t tport_prli_ls_accepts; + atomic64_t tport_prli_ls_rejects; + atomic64_t tport_prli_misc_rejects; + atomic64_t tport_adisc_sent; + atomic64_t tport_adisc_ls_accepts; + atomic64_t tport_adisc_ls_rejects; + atomic64_t tport_logo_sent; + atomic64_t unsupported_frames_ls_rejects; + atomic64_t unsupported_frames_dropped; }; struct fnic_stats { @@ -127,6 +173,7 @@ struct fnic_stats { struct reset_stats reset_stats; struct fw_stats fw_stats; struct vlan_stats vlan_stats; + struct fc_host_statistics host_stats; struct misc_stats misc_stats; }; @@ -138,6 +185,5 @@ struct stats_debug_info { }; int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); -void fnic_stats_debugfs_init(struct fnic *); -void fnic_stats_debugfs_remove(struct fnic *); +const char *fnic_role_to_str(unsigned int role); #endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 4a7536bb0ab38..1cf1400393347 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "fnic_io.h" #include "fnic.h" @@ -43,6 +44,17 @@ int fnic_fc_tracing_enabled = 1; int fnic_fc_trace_cleared = 1; static DEFINE_SPINLOCK(fnic_fc_trace_lock); +static const char * const fnic_role_str[] = { + [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator", +}; + +const char *fnic_role_to_str(unsigned int role) +{ + if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role]) + return "Unknown"; + + return fnic_role_str[role]; +} /* * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information @@ -218,6 +230,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug, int len = 0; int buf_size = debug->buf_size; struct timespec64 val1, val2; + int i = 0; ktime_get_real_ts64(&val1); len = scnprintf(debug->debug_buffer + len, buf_size - len, @@ -280,6 +293,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec), (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec)); + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\tIO Queues and cumulative IOs\n" + "------------------------------------------\n"); + + for (i = 0; i < FNIC_MQ_MAX_QUEUES; i++) { + len += scnprintf(debug->debug_buffer + len, buf_size - len, + "Q:%d -> %lld\n", i, (u64)atomic64_read(&stats->io_stats.ios[i])); + } + len += scnprintf(debug->debug_buffer + len, buf_size - len, "\nCurrent Max IO time : %lld\n", (u64)atomic64_read(&stats->io_stats.current_max_io_time)); @@ -426,7 +449,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of Check Conditions encountered: %lld\n" "Number of QUEUE Fulls: %lld\n" "Number of rport not ready: %lld\n" - "Number of receive frame errors: %lld\n", + "Number of receive frame errors: %lld\n" + "Port speed (in Mbps): %lld\n", (u64)stats->misc_stats.last_isr_time, (s64)val1.tv_sec, val1.tv_nsec, (u64)stats->misc_stats.last_ack_time, @@ -449,18 +473,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), (u64)atomic64_read(&stats->misc_stats.check_condition), (u64)atomic64_read(&stats->misc_stats.queue_fulls), - (u64)atomic64_read(&stats->misc_stats.rport_not_ready), - (u64)atomic64_read(&stats->misc_stats.frame_errors)); - - len += scnprintf(debug->debug_buffer + len, buf_size - len, - "Firmware reported port speed: %llu\n", - (u64)atomic64_read( - &stats->misc_stats.current_port_speed)); + (u64)atomic64_read(&stats->misc_stats.tport_not_ready), + (u64)atomic64_read(&stats->misc_stats.frame_errors), + (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps)); return len; } +int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic) +{ + struct fnic_iport_s *iport = &fnic->iport; + int buf_size = info->buf_size; + int len = info->buffer_len; + struct fnic_tport_s *tport, *next; + unsigned long flags; + + len += snprintf(info->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\t Debug Info\n" + "------------------------------------------\n"); + len += snprintf(info->debug_buffer + len, buf_size - len, + "fnic Name:%s number:%d Role:%s State:%s\n", + fnic->name, fnic->fnic_num, + fnic_role_to_str(fnic->role), + fnic_state_to_str(fnic->state)); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n", + iport->state, iport->flags, iport->vlan_id, iport->fcid); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "usefip:%d fip_state:%d fip_flogi_retry:%d\n", + iport->usefip, iport->fip.state, iport->fip.flogi_retry); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fpma %02x:%02x:%02x:%02x:%02x:%02x", + iport->fpma[5], iport->fpma[4], iport->fpma[3], + iport->fpma[2], iport->fpma[1], iport->fpma[0]); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n", + iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3], + iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n", + iport->fabric.state, iport->fabric.flags, + iport->fabric.retry_counter, iport->e_d_tov, + iport->r_a_tov); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + len += snprintf(info->debug_buffer + len, buf_size - len, + "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n", + tport->fcid, tport->state, tport->flags, + atomic_read(&tport->in_flight), + tport->retry_counter); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return len; +} + /* * fnic_trace_buf_init - Initialize fnic trace buffer logging facility * @@ -479,7 +553,7 @@ int fnic_trace_buf_init(void) fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ FNIC_ENTRY_SIZE_BYTES; - fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE); + fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE); if (!fnic_trace_buf_p) { printk(KERN_ERR PFX "Failed to allocate memory " "for fnic_trace_buf_p\n"); @@ -488,8 +562,7 @@ int fnic_trace_buf_init(void) } fnic_trace_entries.page_offset = - vmalloc(array_size(fnic_max_trace_entries, - sizeof(unsigned long))); + vcalloc(fnic_max_trace_entries, sizeof(unsigned long)); if (!fnic_trace_entries.page_offset) { printk(KERN_ERR PFX "Failed to allocate memory for" " page_offset\n"); @@ -500,8 +573,6 @@ int fnic_trace_buf_init(void) err = -ENOMEM; goto err_fnic_trace_buf_init; } - memset((void *)fnic_trace_entries.page_offset, 0, - (fnic_max_trace_entries * sizeof(unsigned long))); fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0; fnic_buf_head = fnic_trace_buf_p; @@ -562,8 +633,7 @@ int fnic_fc_trace_init(void) fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ FC_TRC_SIZE_BYTES; fnic_fc_ctlr_trace_buf_p = - (unsigned long)vmalloc(array_size(PAGE_SIZE, - fnic_fc_trace_max_pages)); + (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE); if (!fnic_fc_ctlr_trace_buf_p) { pr_err("fnic: Failed to allocate memory for " "FC Control Trace Buf\n"); @@ -571,13 +641,9 @@ int fnic_fc_trace_init(void) goto err_fnic_fc_ctlr_trace_buf_init; } - memset((void *)fnic_fc_ctlr_trace_buf_p, 0, - fnic_fc_trace_max_pages * PAGE_SIZE); - /* Allocate memory for page offset */ fc_trace_entries.page_offset = - vmalloc(array_size(fc_trace_max_entries, - sizeof(unsigned long))); + vcalloc(fc_trace_max_entries, sizeof(unsigned long)); if (!fc_trace_entries.page_offset) { pr_err("fnic:Failed to allocate memory for page_offset\n"); if (fnic_fc_ctlr_trace_buf_p) { @@ -588,8 +654,6 @@ int fnic_fc_trace_init(void) err = -ENOMEM; goto err_fnic_fc_ctlr_trace_buf_init; } - memset((void *)fc_trace_entries.page_offset, 0, - (fc_trace_max_entries * sizeof(unsigned long))); fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; @@ -691,7 +755,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, */ if (frame_type == FNIC_FC_RECV) { eth_fcoe_hdr_len = sizeof(struct ethhdr) + - sizeof(struct fcoe_hdr); + sizeof(struct fcoe_hdr); memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); /* Copy the rest of data frame */ memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 5988c300cc82e..f95f5d692bccd 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -155,6 +155,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; } + pr_info("res_type_wq: %d res_type_rq: %d res_type_cq: %d res_type_intr_ctrl: %d\n", + vdev->res[RES_TYPE_WQ].count, vdev->res[RES_TYPE_RQ].count, + vdev->res[RES_TYPE_CQ].count, vdev->res[RES_TYPE_INTR_CTRL].count); + return 0; } diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h index e343e1d0f8013..b7fccd2cd1508 100644 --- a/drivers/scsi/fnic/vnic_scsi.h +++ b/drivers/scsi/fnic/vnic_scsi.h @@ -38,7 +38,7 @@ #define VNIC_FNIC_RATOV_MAX 255000 #define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256 -#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112 +#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2048 #define VNIC_FNIC_FLOGI_RETRIES_MIN 0 #define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff @@ -67,7 +67,7 @@ #define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255 #define VNIC_FNIC_LUNS_PER_TARGET_MIN 1 -#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024 +#define VNIC_FNIC_LUNS_PER_TARGET_MAX 4096 /* Device-specific region: scsi configuration */ struct vnic_fc_config { @@ -91,10 +91,19 @@ struct vnic_fc_config { u16 ra_tov; u16 intr_timer; u8 intr_timer_type; + u8 intr_mode; + u8 lun_queue_depth; + u8 io_timeout_retry; + u16 wq_copy_count; }; #define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */ #define VFCF_PERBI 0x2 /* persistent binding info available */ #define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */ +#define VFCF_FC_INITIATOR 0x20 /* FC Initiator Mode */ +#define VFCF_FC_TARGET 0x40 /* FC Target Mode */ +#define VFCF_FC_NVME_INITIATOR 0x80 /* FC-NVMe Initiator Mode */ +#define VFCF_FC_NVME_TARGET 0x100 /* FC-NVMe Target Mode */ + #endif /* _VNIC_SCSI_H_ */ diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4959c26d3b71b..8a5cd8c51e0b2 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4489,6 +4489,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) if (!count) return IRQ_NONE; + if (count > evt->length) { + dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n", + count, evt->length); + return IRQ_NONE; + } + evt->count = count; evt->flags |= DWC3_EVENT_PENDING; diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 3904333fa6c38..4b9e3d9fa926a 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -55,8 +55,6 @@ static int mdsc_show(struct seq_file *s, void *p) struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; struct rb_node *rp; - int pathlen = 0; - u64 pathbase; char *path; mutex_lock(&mdsc->mutex); @@ -81,8 +79,8 @@ static int mdsc_show(struct seq_file *s, void *p) if (req->r_inode) { seq_printf(s, " #%llx", ceph_ino(req->r_inode)); } else if (req->r_dentry) { - path = ceph_mdsc_build_path(req->r_dentry, &pathlen, - &pathbase, 0); + struct ceph_path_info path_info; + path = ceph_mdsc_build_path(req->r_dentry, &path_info, 0); if (IS_ERR(path)) path = NULL; spin_lock(&req->r_dentry->d_lock); @@ -91,7 +89,7 @@ static int mdsc_show(struct seq_file *s, void *p) req->r_dentry, path ? path : ""); spin_unlock(&req->r_dentry->d_lock); - ceph_mdsc_free_path(path, pathlen); + ceph_mdsc_free_path_info(&path_info); } else if (req->r_path1) { seq_printf(s, " #%llx/%s", req->r_ino1.ino, req->r_path1); @@ -100,8 +98,8 @@ static int mdsc_show(struct seq_file *s, void *p) } if (req->r_old_dentry) { - path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, - &pathbase, 0); + struct ceph_path_info path_info; + path = ceph_mdsc_build_path(req->r_old_dentry, &path_info, 0); if (IS_ERR(path)) path = NULL; spin_lock(&req->r_old_dentry->d_lock); @@ -111,7 +109,7 @@ static int mdsc_show(struct seq_file *s, void *p) req->r_old_dentry, path ? path : ""); spin_unlock(&req->r_old_dentry->d_lock); - ceph_mdsc_free_path(path, pathlen); + ceph_mdsc_free_path_info(&path_info); } else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) { if (req->r_ino2.ino) seq_printf(s, " #%llx/%s", req->r_ino2.ino, diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index e7b61aacd7424..a9af046fcbff7 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1224,10 +1224,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc, /* If op failed, mark everyone involved for errors */ if (result) { - int pathlen = 0; - u64 base = 0; - char *path = ceph_mdsc_build_path(dentry, &pathlen, - &base, 0); + struct ceph_path_info path_info = {0}; + char *path = ceph_mdsc_build_path(dentry, &path_info, 0); /* mark error on parent + clear complete */ mapping_set_error(req->r_parent->i_mapping, result); @@ -1241,8 +1239,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc, mapping_set_error(req->r_old_inode->i_mapping, result); pr_warn("async unlink failure path=(%llx)%s result=%d!\n", - base, IS_ERR(path) ? "<>" : path, result); - ceph_mdsc_free_path(path, pathlen); + path_info.vino.ino, IS_ERR(path) ? "<>" : path, result); + ceph_mdsc_free_path_info(&path_info); } out: iput(req->r_old_inode); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8bec680bed46b..2e455815864e4 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -576,14 +576,12 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc, mapping_set_error(req->r_parent->i_mapping, result); if (result) { - int pathlen = 0; - u64 base = 0; - char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen, - &base, 0); + struct ceph_path_info path_info = {0}; + char *path = ceph_mdsc_build_path(req->r_dentry, &path_info, 0); pr_warn("async create failure path=(%llx)%s result=%d!\n", - base, IS_ERR(path) ? "<>" : path, result); - ceph_mdsc_free_path(path, pathlen); + path_info.vino.ino, IS_ERR(path) ? "<>" : path, result); + ceph_mdsc_free_path_info(&path_info); ceph_dir_clear_complete(req->r_parent); if (!d_unhashed(dentry)) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 34cfeb0fba9e8..0eb1b1912e1cc 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -55,6 +55,52 @@ static int ceph_set_ino_cb(struct inode *inode, void *data) return 0; } +/* + * Check if the parent inode matches the vino from directory reply info + */ +static inline bool ceph_vino_matches_parent(struct inode *parent, + struct ceph_vino vino) +{ + return ceph_ino(parent) == vino.ino && ceph_snap(parent) == vino.snap; +} + +/* + * Validate that the directory inode referenced by @req->r_parent matches the + * inode number and snapshot id contained in the reply's directory record. If + * they do not match – which can theoretically happen if the parent dentry was + * moved between the time the request was issued and the reply arrived – fall + * back to looking up the correct inode in the inode cache. + * + * A reference is *always* returned. Callers that receive a different inode + * than the original @parent are responsible for dropping the extra reference + * once the reply has been processed. + */ +static struct inode *ceph_get_reply_dir(struct super_block *sb, + struct inode *parent, + struct ceph_mds_reply_info_parsed *rinfo) +{ + struct ceph_vino vino; + + if (unlikely(!rinfo->diri.in)) + return parent; /* nothing to compare against */ + + /* If we didn't have a cached parent inode to begin with, just bail out. */ + if (!parent) + return NULL; + + vino.ino = le64_to_cpu(rinfo->diri.in->ino); + vino.snap = le64_to_cpu(rinfo->diri.in->snapid); + + if (likely(ceph_vino_matches_parent(parent, vino))) + return parent; /* matches – use the original reference */ + + /* Mismatch – this should be rare. Emit a WARN and obtain the correct inode. */ + WARN_ONCE(1, "ceph: reply dir mismatch (parent valid %llx.%llx reply %llx.%llx)\n", + ceph_ino(parent), ceph_snap(parent), vino.ino, vino.snap); + + return ceph_get_inode(sb, vino, NULL); +} + /** * ceph_new_inode - allocate a new inode in advance of an expected create * @dir: parent directory for new inode @@ -1489,6 +1535,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) struct inode *in = NULL; struct ceph_vino tvino, dvino; struct ceph_fs_client *fsc = ceph_sb_to_client(sb); + struct inode *parent_dir = NULL; int err = 0; dout("fill_trace %p is_dentry %d is_target %d\n", req, @@ -1502,10 +1549,18 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) } if (rinfo->head->is_dentry) { - struct inode *dir = req->r_parent; + /* + * r_parent may be stale, in cases when R_PARENT_LOCKED is not set, + * so we need to get the correct inode + */ + parent_dir = ceph_get_reply_dir(sb, req->r_parent, rinfo); + if (unlikely(IS_ERR(parent_dir))) { + err = PTR_ERR(parent_dir); + goto done; + } - if (dir) { - err = ceph_fill_inode(dir, NULL, &rinfo->diri, + if (parent_dir) { + err = ceph_fill_inode(parent_dir, NULL, &rinfo->diri, rinfo->dirfrag, session, -1, &req->r_caps_reservation); if (err < 0) @@ -1514,14 +1569,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) WARN_ON_ONCE(1); } - if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME && + if (parent_dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME && test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { bool is_nokey = false; struct qstr dname; struct dentry *dn, *parent; struct fscrypt_str oname = FSTR_INIT(NULL, 0); - struct ceph_fname fname = { .dir = dir, + struct ceph_fname fname = { .dir = parent_dir, .name = rinfo->dname, .ctext = rinfo->altname, .name_len = rinfo->dname_len, @@ -1530,10 +1585,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) BUG_ON(!rinfo->head->is_target); BUG_ON(req->r_dentry); - parent = d_find_any_alias(dir); + parent = d_find_any_alias(parent_dir); BUG_ON(!parent); - err = ceph_fname_alloc_buffer(dir, &oname); + err = ceph_fname_alloc_buffer(parent_dir, &oname); if (err < 0) { dput(parent); goto done; @@ -1542,7 +1597,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey); if (err < 0) { dput(parent); - ceph_fname_free_buffer(dir, &oname); + ceph_fname_free_buffer(parent_dir, &oname); goto done; } dname.name = oname.name; @@ -1550,6 +1605,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) dname.hash = full_name_hash(parent, dname.name, dname.len); tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + retry_lookup: dn = d_lookup(parent, &dname); dout("d_lookup on parent=%p name=%.*s got %p\n", @@ -1561,7 +1617,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) dname.len, dname.name, dn); if (!dn) { dput(parent); - ceph_fname_free_buffer(dir, &oname); + ceph_fname_free_buffer(parent_dir, &oname); err = -ENOMEM; goto done; } @@ -1576,12 +1632,12 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) ceph_snap(d_inode(dn)) != tvino.snap)) { dout(" dn %p points to wrong inode %p\n", dn, d_inode(dn)); - ceph_dir_clear_ordered(dir); + ceph_dir_clear_ordered(parent_dir); d_delete(dn); dput(dn); goto retry_lookup; } - ceph_fname_free_buffer(dir, &oname); + ceph_fname_free_buffer(parent_dir, &oname); req->r_dentry = dn; dput(parent); @@ -1763,6 +1819,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) &dvino, ptvino); } done: + /* Drop extra ref from ceph_get_reply_dir() if it returned a new inode */ + if (unlikely(!IS_ERR_OR_NULL(parent_dir) && parent_dir != req->r_parent)) + iput(parent_dir); dout("fill_trace done err=%d\n", err); return err; } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 92bcf1cd8c16e..7350dd73242e6 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2631,8 +2631,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) /** * ceph_mdsc_build_path - build a path string to a given dentry * @dentry: dentry to which path should be built - * @plen: returned length of string - * @pbase: returned base inode number + * @path_info: output path, length, base ino+snap, and freepath ownership flag * @for_wire: is this path going to be sent to the MDS? * * Build a string that represents the path to the dentry. This is mostly called @@ -2649,7 +2648,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) * Encode hidden .snap dirs as a double /, i.e. * foo/.snap/bar -> foo//bar */ -char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase, +char *ceph_mdsc_build_path(struct dentry *dentry, struct ceph_path_info *path_info, int for_wire) { struct dentry *cur; @@ -2761,16 +2760,28 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase, goto retry; } - *pbase = base; - *plen = PATH_MAX - 1 - pos; + /* Initialize the output structure */ + memset(path_info, 0, sizeof(*path_info)); + + path_info->vino.ino = base; + path_info->pathlen = PATH_MAX - 1 - pos; + path_info->path = path + pos; + path_info->freepath = true; + + /* Set snap from dentry if available */ + if (d_inode(dentry)) + path_info->vino.snap = ceph_snap(d_inode(dentry)); + else + path_info->vino.snap = CEPH_NOSNAP; + dout("build_path on %p %d built %llx '%.*s'\n", - dentry, d_count(dentry), base, *plen, path + pos); + dentry, d_count(dentry), base, PATH_MAX - 1 - pos, path + pos); return path + pos; } static int build_dentry_path(struct dentry *dentry, struct inode *dir, - const char **ppath, int *ppathlen, u64 *pino, - bool *pfreepath, bool parent_locked) + struct ceph_path_info *path_info, + bool parent_locked) { char *path; @@ -2779,40 +2790,46 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir, dir = d_inode_rcu(dentry->d_parent); if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP && !IS_ENCRYPTED(dir)) { - *pino = ceph_ino(dir); + path_info->vino.ino = ceph_ino(dir); + path_info->vino.snap = ceph_snap(dir); rcu_read_unlock(); - *ppath = dentry->d_name.name; - *ppathlen = dentry->d_name.len; + path_info->path = dentry->d_name.name; + path_info->pathlen = dentry->d_name.len; + path_info->freepath = false; return 0; } rcu_read_unlock(); - path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); + path = ceph_mdsc_build_path(dentry, path_info, 1); if (IS_ERR(path)) return PTR_ERR(path); - *ppath = path; - *pfreepath = true; + /* + * ceph_mdsc_build_path already fills path_info, including snap handling. + */ return 0; } -static int build_inode_path(struct inode *inode, - const char **ppath, int *ppathlen, u64 *pino, - bool *pfreepath) +static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info) { struct dentry *dentry; char *path; if (ceph_snap(inode) == CEPH_NOSNAP) { - *pino = ceph_ino(inode); - *ppathlen = 0; + path_info->vino.ino = ceph_ino(inode); + path_info->vino.snap = ceph_snap(inode); + path_info->pathlen = 0; + path_info->freepath = false; return 0; } dentry = d_find_alias(inode); - path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); + path = ceph_mdsc_build_path(dentry, path_info, 1); dput(dentry); if (IS_ERR(path)) return PTR_ERR(path); - *ppath = path; - *pfreepath = true; + /* + * ceph_mdsc_build_path already fills path_info, including snap from dentry. + * Override with inode's snap since that's what this function is for. + */ + path_info->vino.snap = ceph_snap(inode); return 0; } @@ -2822,25 +2839,30 @@ static int build_inode_path(struct inode *inode, */ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, struct inode *rdiri, const char *rpath, - u64 rino, const char **ppath, int *pathlen, - u64 *ino, bool *freepath, bool parent_locked) + u64 rino, struct ceph_path_info *path_info, + bool parent_locked) { int r = 0; + /* Initialize the output structure */ + memset(path_info, 0, sizeof(*path_info)); + if (rinode) { - r = build_inode_path(rinode, ppath, pathlen, ino, freepath); + r = build_inode_path(rinode, path_info); dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), ceph_snap(rinode)); } else if (rdentry) { - r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, - freepath, parent_locked); - dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, - *ppath); + r = build_dentry_path(rdentry, rdiri, path_info, parent_locked); + dout(" dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino, + path_info->pathlen, path_info->path); } else if (rpath || rino) { - *ino = rino; - *ppath = rpath; - *pathlen = rpath ? strlen(rpath) : 0; - dout(" path %.*s\n", *pathlen, rpath); + path_info->vino.ino = rino; + path_info->vino.snap = CEPH_NOSNAP; + path_info->path = rpath; + path_info->pathlen = rpath ? strlen(rpath) : 0; + path_info->freepath = false; + + dout(" path %.*s\n", path_info->pathlen, rpath); } return r; @@ -2893,28 +2915,49 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, struct ceph_mds_client *mdsc = session->s_mdsc; struct ceph_msg *msg; struct ceph_mds_request_head_old *head; - const char *path1 = NULL; - const char *path2 = NULL; - u64 ino1 = 0, ino2 = 0; - int pathlen1 = 0, pathlen2 = 0; - bool freepath1 = false, freepath2 = false; + struct ceph_path_info path_info1 = {0}; + struct ceph_path_info path_info2 = {0}; struct dentry *old_dentry = NULL; int len; u16 releases; void *p, *end; int ret; bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME); + bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); ret = set_request_path_attr(req->r_inode, req->r_dentry, req->r_parent, req->r_path1, req->r_ino1.ino, - &path1, &pathlen1, &ino1, &freepath1, - test_bit(CEPH_MDS_R_PARENT_LOCKED, - &req->r_req_flags)); + &path_info1, parent_locked); if (ret < 0) { msg = ERR_PTR(ret); goto out; } + /* + * When the parent directory's i_rwsem is *not* locked, req->r_parent may + * have become stale (e.g. after a concurrent rename) between the time the + * dentry was looked up and now. If we detect that the stored r_parent + * does not match the inode number we just encoded for the request, switch + * to the correct inode so that the MDS receives a valid parent reference. + */ + if (!parent_locked && req->r_parent && path_info1.vino.ino && + ceph_ino(req->r_parent) != path_info1.vino.ino) { + struct inode *old_parent = req->r_parent; + struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL); + if (!IS_ERR(correct_dir)) { + WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n", + ceph_ino(old_parent), path_info1.vino.ino); + /* + * Transfer CEPH_CAP_PIN from the old parent to the new one. + * The pin was taken earlier in ceph_mdsc_submit_request(). + */ + ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN); + iput(old_parent); + req->r_parent = correct_dir; + ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); + } + } + /* If r_old_dentry is set, then assume that its parent is locked */ if (req->r_old_dentry && !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED)) @@ -2922,7 +2965,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, ret = set_request_path_attr(NULL, old_dentry, req->r_old_dentry_dir, req->r_path2, req->r_ino2.ino, - &path2, &pathlen2, &ino2, &freepath2, true); + &path_info2, true); if (ret < 0) { msg = ERR_PTR(ret); goto out_free1; @@ -2939,7 +2982,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, /* filepaths */ len += 2 * (1 + sizeof(u32) + sizeof(u64)); - len += pathlen1 + pathlen2; + len += path_info1.pathlen + path_info2.pathlen; /* cap releases */ len += sizeof(struct ceph_mds_request_release) * @@ -2947,9 +2990,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, !!req->r_old_inode_drop + !!req->r_old_dentry_drop); if (req->r_dentry_drop) - len += pathlen1; + len += path_info1.pathlen; if (req->r_old_dentry_drop) - len += pathlen2; + len += path_info2.pathlen; /* MClientRequest tail */ @@ -3008,8 +3051,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, head->ino = cpu_to_le64(req->r_deleg_ino); head->args = req->r_args; - ceph_encode_filepath(&p, end, ino1, path1); - ceph_encode_filepath(&p, end, ino2, path2); + ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path); + ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path); /* make note of release offset, in case we need to replay */ req->r_request_release_offset = p - msg->front.iov_base; @@ -3072,11 +3115,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, msg->hdr.data_off = cpu_to_le16(0); out_free2: - if (freepath2) - ceph_mdsc_free_path((char *)path2, pathlen2); + ceph_mdsc_free_path_info(&path_info2); out_free1: - if (freepath1) - ceph_mdsc_free_path((char *)path1, pathlen1); + ceph_mdsc_free_path_info(&path_info1); out: return msg; out_err: @@ -4323,24 +4364,20 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) struct ceph_pagelist *pagelist = recon_state->pagelist; struct dentry *dentry; struct ceph_cap *cap; - char *path; - int pathlen = 0, err; - u64 pathbase; + struct ceph_path_info path_info = {0}; + int err; u64 snap_follows; dentry = d_find_primary(inode); if (dentry) { /* set pathbase to parent dir when msg_version >= 2 */ - path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, - recon_state->msg_version >= 2); + char *path = ceph_mdsc_build_path(dentry, &path_info, + recon_state->msg_version >= 2); dput(dentry); if (IS_ERR(path)) { err = PTR_ERR(path); goto out_err; } - } else { - path = NULL; - pathbase = 0; } spin_lock(&ci->i_ceph_lock); @@ -4373,7 +4410,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v2.issued = cpu_to_le32(cap->issued); rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); - rec.v2.pathbase = cpu_to_le64(pathbase); + rec.v2.pathbase = cpu_to_le64(path_info.vino.ino); rec.v2.flock_len = (__force __le32) ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); } else { @@ -4384,7 +4421,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime); ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime); rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); - rec.v1.pathbase = cpu_to_le64(pathbase); + rec.v1.pathbase = cpu_to_le64(path_info.vino.ino); } if (list_empty(&ci->i_cap_snaps)) { @@ -4446,7 +4483,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) sizeof(struct ceph_filelock); rec.v2.flock_len = cpu_to_le32(struct_len); - struct_len += sizeof(u32) + pathlen + sizeof(rec.v2); + struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2); if (struct_v >= 2) struct_len += sizeof(u64); /* snap_follows */ @@ -4470,7 +4507,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) ceph_pagelist_encode_8(pagelist, 1); ceph_pagelist_encode_32(pagelist, struct_len); } - ceph_pagelist_encode_string(pagelist, path, pathlen); + ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen); ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); ceph_locks_to_pagelist(flocks, pagelist, num_fcntl_locks, num_flock_locks); @@ -4481,17 +4518,17 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) } else { err = ceph_pagelist_reserve(pagelist, sizeof(u64) + sizeof(u32) + - pathlen + sizeof(rec.v1)); + path_info.pathlen + sizeof(rec.v1)); if (err) goto out_err; ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); - ceph_pagelist_encode_string(pagelist, path, pathlen); + ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen); ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); } out_err: - ceph_mdsc_free_path(path, pathlen); + ceph_mdsc_free_path_info(&path_info); if (!err) recon_state->nr_caps++; return err; diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index b45ce3fa87907..a2dd405df02f9 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -577,13 +577,23 @@ extern int ceph_iterate_session_caps(struct ceph_mds_session *session, void *arg); extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); -static inline void ceph_mdsc_free_path(char *path, int len) +/* + * Structure to group path-related output parameters for build_*_path functions + */ +struct ceph_path_info { + const char *path; + int pathlen; + struct ceph_vino vino; + bool freepath; +}; + +static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info) { - if (!IS_ERR_OR_NULL(path)) - __putname(path - (PATH_MAX - 1 - len)); + if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path)) + __putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen)); } -extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, +extern char *ceph_mdsc_build_path(struct dentry *dentry, struct ceph_path_info *path_info, int for_wire); extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry); diff --git a/fs/namespace.c b/fs/namespace.c index a9fbb35e084fa..c810d71fa61be 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2281,6 +2281,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) return attach_recursive_mnt(mnt, p, mp, false); } +static int may_change_propagation(const struct mount *m) +{ + struct mnt_namespace *ns = m->mnt_ns; + + // it must be mounted in some namespace + if (IS_ERR_OR_NULL(ns)) // is_mounted() + return -EINVAL; + // and the caller must be admin in userns of that namespace + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return -EPERM; + return 0; +} + /* * Sanity check the flags to change_mnt_propagation. */ @@ -2317,6 +2330,10 @@ static int do_change_type(struct path *path, int ms_flags) return -EINVAL; namespace_lock(); + err = may_change_propagation(mnt); + if (err) + goto out_unlock; + if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 21a887f3a36fa..3de730feb4375 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -5256,7 +5256,8 @@ void cifs_oplock_break(struct work_struct *work) struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct super_block *sb = inode->i_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_tcon *tcon; struct TCP_Server_Info *server; @@ -5266,6 +5267,12 @@ void cifs_oplock_break(struct work_struct *work) __u64 persistent_fid, volatile_fid; __u16 net_fid; + /* + * Hold a reference to the superblock to prevent it and its inodes from + * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put() + * may release the last reference to the sb and trigger inode eviction. + */ + cifs_sb_active(sb); wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); @@ -5338,6 +5345,7 @@ void cifs_oplock_break(struct work_struct *work) cifs_put_tlink(tlink); out: cifs_done_oplock_break(cinode); + cifs_sb_deactive(sb); } /* diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7ec2fece55e2d..2bdc4e008c410 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4157,7 +4157,17 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, return 0; } -bool dev_nit_active(struct net_device *dev); +bool dev_nit_active_rcu(const struct net_device *dev); +static inline bool dev_nit_active(const struct net_device *dev) +{ + bool ret; + + rcu_read_lock(); + ret = dev_nit_active_rcu(dev); + rcu_read_unlock(); + return ret; +} + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); static inline void __dev_put(struct net_device *dev) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 555688672e673..872a9beb83faa 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -185,6 +185,9 @@ struct net { #if IS_ENABLED(CONFIG_SMC) struct netns_smc smc; #endif + + RH_KABI_EXTEND(struct list_head ptype_all) + RH_KABI_EXTEND(struct list_head ptype_specific) } __randomize_layout; #include diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 702f15b1ebe43..75dd8d92f3d73 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -300,8 +300,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct) /* use after obtaining a reference count */ static inline bool nf_ct_should_gc(const struct nf_conn *ct) { - return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) && - !nf_ct_is_dying(ct); + if (!nf_ct_is_confirmed(ct)) + return false; + + /* load ct->timeout after is_confirmed() test. + * Pairs with __nf_conntrack_confirm() which: + * 1. Increases ct->timeout value + * 2. Inserts ct into rcu hlist + * 3. Sets the confirmed bit + * 4. Unlocks the hlist lock + */ + smp_acquire__after_ctrl_dep(); + + return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct); } #define NF_CT_DAY (86400 * HZ) diff --git a/include/net/tcp.h b/include/net/tcp.h index 34754f55a29ea..98f1cbe9372d2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -40,6 +40,7 @@ #include #include #include +#include RH_KABI_HIDE_INCLUDE() #include #include @@ -637,6 +638,19 @@ void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); void tcp_sack_compress_send_ack(struct sock *sk); +static inline void tcp_cleanup_skb(struct sk_buff *skb) +{ + skb_dst_drop(skb); + secpath_reset(skb); +} + +static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) +{ + DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); + DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb)); + __skb_queue_tail(&sk->sk_receive_queue, skb); +} + /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); static inline void tcp_clear_xmit_timers(struct sock *sk) diff --git a/io_uring/futex.c b/io_uring/futex.c index 914848f46beb2..90ff427f0294e 100644 --- a/io_uring/futex.c +++ b/io_uring/futex.c @@ -337,6 +337,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) goto done_unlock; } + req->flags |= REQ_F_ASYNC_DATA; req->async_data = ifd; ifd->q = futex_q_init; ifd->q.bitset = iof->futex_mask; @@ -359,6 +360,8 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) if (ret < 0) req_set_fail(req); io_req_set_res(req, ret, 0); + req->async_data = NULL; + req->flags &= ~REQ_F_ASYNC_DATA; kfree(ifd); return IOU_OK; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e6eea6e98e743..cceaa68a904c5 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1913,6 +1913,26 @@ static int find_lowest_rq(struct task_struct *task) return -1; } +static struct task_struct *pick_next_pushable_task(struct rq *rq) +{ + struct task_struct *p; + + if (!has_pushable_tasks(rq)) + return NULL; + + p = plist_first_entry(&rq->rt.pushable_tasks, + struct task_struct, pushable_tasks); + + BUG_ON(rq->cpu != task_cpu(p)); + BUG_ON(task_current(rq, p)); + BUG_ON(p->nr_cpus_allowed <= 1); + + BUG_ON(!task_on_rq_queued(p)); + BUG_ON(!rt_task(p)); + + return p; +} + /* Will lock the rq it finds */ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { @@ -1943,18 +1963,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) /* * We had to unlock the run queue. In * the mean time, task could have - * migrated already or had its affinity changed. - * Also make sure that it wasn't scheduled on its rq. + * migrated already or had its affinity changed, + * therefore check if the task is still at the + * head of the pushable tasks list. * It is possible the task was scheduled, set * "migrate_disabled" and then got preempted, so we must * check the task migration disable flag here too. */ - if (unlikely(task_rq(task) != rq || + if (unlikely(is_migration_disabled(task) || !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || - task_on_cpu(rq, task) || - !rt_task(task) || - is_migration_disabled(task) || - !task_on_rq_queued(task))) { + task != pick_next_pushable_task(rq))) { double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; @@ -1974,26 +1992,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) return lowest_rq; } -static struct task_struct *pick_next_pushable_task(struct rq *rq) -{ - struct task_struct *p; - - if (!has_pushable_tasks(rq)) - return NULL; - - p = plist_first_entry(&rq->rt.pushable_tasks, - struct task_struct, pushable_tasks); - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); - BUG_ON(p->nr_cpus_allowed <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!rt_task(p)); - - return p; -} - /* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task diff --git a/net/core/dev.c b/net/core/dev.c index d16786a809da6..23ff24e4d7b41 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -164,7 +164,6 @@ static DEFINE_SPINLOCK(ptype_lock); struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; -struct list_head ptype_all __read_mostly; /* Taps */ static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_extack(unsigned long val, @@ -569,10 +568,18 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev) static inline struct list_head *ptype_head(const struct packet_type *pt) { - if (pt->type == htons(ETH_P_ALL)) - return pt->dev ? &pt->dev->ptype_all : &ptype_all; - else - return pt->dev ? &pt->dev->ptype_specific : + if (pt->type == htons(ETH_P_ALL)) { + if (!pt->af_packet_net && !pt->dev) + return NULL; + + return pt->dev ? &pt->dev->ptype_all : + &pt->af_packet_net->ptype_all; + } + + if (pt->dev) + return &pt->dev->ptype_specific; + + return pt->af_packet_net ? &pt->af_packet_net->ptype_specific : &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; } @@ -593,6 +600,9 @@ void dev_add_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); + if (WARN_ON_ONCE(!head)) + return; + spin_lock(&ptype_lock); list_add_rcu(&pt->list, head); spin_unlock(&ptype_lock); @@ -617,6 +627,9 @@ void __dev_remove_pack(struct packet_type *pt) struct list_head *head = ptype_head(pt); struct packet_type *pt1; + if (!head) + return; + spin_lock(&ptype_lock); list_for_each_entry(pt1, head, list) { @@ -2301,15 +2314,21 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) } /** - * dev_nit_active - return true if any network interface taps are in use + * dev_nit_active_rcu - return true if any network interface taps are in use + * + * The caller must hold the RCU lock * * @dev: network device to check for the presence of taps */ -bool dev_nit_active(struct net_device *dev) +bool dev_nit_active_rcu(const struct net_device *dev) { - return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); + /* Callers may hold either RCU or RCU BH lock */ + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + + return !list_empty(&dev_net(dev)->ptype_all) || + !list_empty(&dev->ptype_all); } -EXPORT_SYMBOL_GPL(dev_nit_active); +EXPORT_SYMBOL_GPL(dev_nit_active_rcu); /* * Support routine. Sends outgoing frames to any network @@ -2321,9 +2340,10 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) struct packet_type *ptype; struct sk_buff *skb2 = NULL; struct packet_type *pt_prev = NULL; - struct list_head *ptype_list = &ptype_all; + struct list_head *ptype_list; rcu_read_lock(); + ptype_list = &dev_net_rcu(dev)->ptype_all; again: list_for_each_entry_rcu(ptype, ptype_list, list) { if (READ_ONCE(ptype->ignore_outgoing)) @@ -2367,7 +2387,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) pt_prev = ptype; } - if (ptype_list == &ptype_all) { + if (ptype_list != &dev->ptype_all) { ptype_list = &dev->ptype_all; goto again; } @@ -3581,7 +3601,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, unsigned int len; int rc; - if (dev_nit_active(dev)) + if (dev_nit_active_rcu(dev)) dev_queue_xmit_nit(skb, dev); len = skb->len; @@ -5445,7 +5465,8 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, if (pfmemalloc) goto skip_taps; - list_for_each_entry_rcu(ptype, &ptype_all, list) { + list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all, + list) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; @@ -5557,6 +5578,14 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, &ptype_base[ntohs(type) & PTYPE_HASH_MASK]); + + /* orig_dev and skb->dev could belong to different netns; + * Even in such case we need to traverse only the list + * coming from skb->dev, as the ptype owner (packet socket) + * will use dev_net(skb->dev) to do namespace filtering. + */ + deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, + &dev_net_rcu(skb->dev)->ptype_specific); } deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, @@ -11921,7 +11950,6 @@ static int __init net_dev_init(void) if (netdev_kobject_init()) goto out; - INIT_LIST_HEAD(&ptype_all); for (i = 0; i < PTYPE_HASH_SIZE; i++) INIT_LIST_HEAD(&ptype_base[i]); diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index f6aa2f227416a..6d29e309c5f86 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -175,12 +175,18 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos) } } - list_for_each_entry_rcu(pt, &ptype_all, list) { + list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { if (i == pos) return pt; ++i; } + list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) { + if (i == pos) + return pt; + ++i; + } + for (t = 0; t < PTYPE_HASH_SIZE; t++) { list_for_each_entry_rcu(pt, &ptype_base[t], list) { if (i == pos) @@ -200,6 +206,7 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct net *net = seq_file_net(seq); struct net_device *dev; struct packet_type *pt; struct list_head *nxt; @@ -223,14 +230,22 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) } } - nxt = ptype_all.next; - goto ptype_all; + nxt = net->ptype_all.next; + goto net_ptype_all; } - if (pt->type == htons(ETH_P_ALL)) { -ptype_all: - if (nxt != &ptype_all) + if (pt->af_packet_net) { +net_ptype_all: + if (nxt != &net->ptype_all && nxt != &net->ptype_specific) goto found; + + if (nxt == &net->ptype_all) { + /* continue with ->ptype_specific if it's not empty */ + nxt = net->ptype_specific.next; + if (nxt != &net->ptype_specific) + goto found; + } + hash = 0; nxt = ptype_base[0].next; } else diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 9fe7ae5e18633..16459b72baadf 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -311,6 +311,9 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_id); static __net_init void preinit_net(struct net *net) { ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt"); + + INIT_LIST_HEAD(&net->ptype_all); + INIT_LIST_HEAD(&net->ptype_specific); } /* diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index b6b1bff6f24a6..8392d304a72eb 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -204,6 +204,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) return -EINVAL; + if (skb_is_gso(skb)) + skb_gso_reset(skb); + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); pskb_pull(skb, ETH_HLEN); skb_reset_network_header(skb); @@ -298,6 +301,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu) if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) return -EINVAL; + if (skb_is_gso(skb)) + skb_gso_reset(skb); + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); pskb_pull(skb, ETH_HLEN); skb_reset_network_header(skb); @@ -416,7 +422,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, skb_dst_update_pmtu_no_confirm(skb, mtu); - if (!reply || skb->pkt_type == PACKET_HOST) + if (!reply) return 0; if (skb->protocol == htons(ETH_P_IP)) diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index b9bb69721e801..b71bcd999fb82 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -194,7 +194,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) if (!skb) return; - skb_dst_drop(skb); + tcp_cleanup_skb(skb); /* segs_in has been initialized to 1 in tcp_create_openreq_child(). * Hence, reset segs_in to 0 before calling tcp_segs_in() * to avoid double counting. Also, tcp_segs_in() expects @@ -211,7 +211,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); tp->syn_data_acked = 1; /* u64_stats_update_begin(&tp->syncp) not needed here, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 86c59cc3543d9..f6b4953ebeedb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4809,7 +4809,7 @@ static void tcp_ofo_queue(struct sock *sk) tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (!eaten) - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); else kfree_skb_partial(skb, fragstolen); @@ -5000,7 +5000,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, skb, fragstolen)) ? 1 : 0; tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); skb_set_owner_r(skb, sk); } return eaten; @@ -5083,7 +5083,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); return; } - skb_dst_drop(skb); + tcp_cleanup_skb(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); reason = SKB_DROP_REASON_NOT_SPECIFIED; @@ -5994,7 +5994,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ - skb_dst_drop(skb); + tcp_cleanup_skb(skb); __skb_pull(skb, tcp_header_len); eaten = tcp_queue_rcv(sk, skb, &fragstolen); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f88f84af31bcc..e7ab65350478d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1773,7 +1773,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, */ skb_condense(skb); - skb_dst_drop(skb); + tcp_cleanup_skb(skb); if (unlikely(tcp_checksum_complete(skb))) { bh_unlock_sock(sk); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ef4e6088595c3..323ffb519d48e 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -803,8 +803,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) } else { im->mca_crcount = idev->mc_qrv; } - in6_dev_put(pmc->idev); ip6_mc_clear_src(pmc); + in6_dev_put(pmc->idev); kfree_rcu(pmc, rcu); } } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 75de47106342a..bcb283728ad8d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1087,6 +1087,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx) hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &nf_conntrack_hash[repl_idx]); + /* confirmed bit must be set after hlist add, not before: + * loser_ct can still be visible to other cpu due to + * SLAB_TYPESAFE_BY_RCU. + */ + smp_mb__before_atomic(); + set_bit(IPS_CONFIRMED_BIT, &loser_ct->status); NF_CT_STAT_INC(net, clash_resolve); return NF_ACCEPT; @@ -1224,8 +1230,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) * user context, else we insert an already 'dead' hash, blocking * further use of that particular connection -JM. */ - ct->status |= IPS_CONFIRMED; - if (unlikely(nf_ct_is_dying(ct))) { NF_CT_STAT_INC(net, insert_failed); goto dying; @@ -1257,7 +1261,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) } } - /* Timer relative to confirmation time, not original + /* Timeout is relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ ct->timeout += nfct_time_stamp; @@ -1265,11 +1269,21 @@ __nf_conntrack_confirm(struct sk_buff *skb) __nf_conntrack_insert_prepare(ct); /* Since the lookup is lockless, hash insertion must be done after - * starting the timer and setting the CONFIRMED bit. The RCU barriers - * guarantee that no other CPU can find the conntrack before the above - * stores are visible. + * setting ct->timeout. The RCU barriers guarantee that no other CPU + * can find the conntrack before the above stores are visible. */ __nf_conntrack_hash_insert(ct, hash, reply_hash); + + /* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups + * skip entries that lack this bit. This happens when a CPU is looking + * at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU + * or when another CPU encounters this entry right after the insertion + * but before the set-confirm-bit below. This bit must not be set until + * after __nf_conntrack_hash_insert(). + */ + smp_mb__before_atomic(); + set_bit(IPS_CONFIRMED_BIT, &ct->status); + nf_conntrack_double_unlock(hash, reply_hash); local_bh_enable(); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index b28001be5de9e..e4c327facb52b 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -945,12 +945,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, pskb_trim(skb, ovs_mac_header_len(key)); } - /* Need to set the pkt_type to involve the routing layer. The - * packet movement through the OVS datapath doesn't generally - * use routing, but this is needed for tunnel cases. - */ - skb->pkt_type = PACKET_OUTGOING; - if (likely(!mru || (skb->len <= mru + vport->dev->hard_header_len))) { ovs_vport_send(vport, skb, ovs_key_mac_proto(key)); diff --git a/net/sctp/input.c b/net/sctp/input.c index 4f1844c66bde6..eab05aedfac1e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb) * it's better to just linearize it otherwise crc computing * takes longer. */ - if ((!is_gso && skb_linearize(skb)) || + if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) || !pskb_may_pull(skb, sizeof(struct sctphdr))) goto discard_it; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index caae366c42947..cd22968bf8313 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1773,6 +1773,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout) return tls_decrypt_sg(sk, NULL, sgout, &darg); } +/* All records returned from a recvmsg() call must have the same type. + * 0 is not a valid content type. Use it as "no type reported, yet". + */ static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, u8 *control) { @@ -2019,8 +2022,10 @@ int tls_sw_recvmsg(struct sock *sk, if (err < 0) goto end; + /* process_rx_list() will set @control if it processed any records */ copied = err; - if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) + if (len <= copied || rx_more || + (control && control != TLS_RECORD_TYPE_DATA)) goto end; target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); diff --git a/redhat/kernel.changelog-9.6 b/redhat/kernel.changelog-9.6 index 5855f0455d960..76f1806b2c6a4 100644 --- a/redhat/kernel.changelog-9.6 +++ b/redhat/kernel.changelog-9.6 @@ -1,7 +1,182 @@ -* Mon Sep 08 2025 Patrick Talbert [5.14.0-570.42.2.el9_6] +* Tue Sep 23 2025 CKI KWF Bot [5.14.0-570.49.1.el9_6] +- io_uring/futex: ensure io_futex_wait() cleans up properly on failure (CKI Backport Bot) [RHEL-114335] {CVE-2025-39698} +- selftests: tls: add tests for zero-length records (Sabrina Dubroca) [RHEL-114326] {CVE-2025-39682} +- tls: fix handling of zero-length records on the rx_list (Sabrina Dubroca) [RHEL-114326] {CVE-2025-39682} +Resolves: RHEL-114326, RHEL-114335 + +* Sat Sep 20 2025 CKI KWF Bot [5.14.0-570.48.1.el9_6] +- perf trace: Add missing perf_tool__init() (Michael Petlan) [RHEL-105393] +- ceph: fix client race condition where r_parent becomes stale before sending message (Alex Markuze) [RHEL-114962] +- ceph: fix client race condition validating r_parent before applying state (Alex Markuze) [RHEL-114962] +Resolves: RHEL-105393, RHEL-114962 + +* Thu Sep 18 2025 CKI KWF Bot [5.14.0-570.47.1.el9_6] +- tunnels: reset the GSO metadata before reusing the skb (Antoine Tenart) [RHEL-113916] +- sctp: linearize cloned gso packets in sctp_rcv (CKI Backport Bot) [RHEL-113333] {CVE-2025-38718} +- ice: fix max values for dpll pin phase adjust (Petr Oros) [RHEL-113039] +- ice/ptp: fix crosstimestamp reporting (Petr Oros) [RHEL-112558] +- ice: fix NULL access of tx->in_use in ice_ll_ts_intr (Petr Oros) [RHEL-112873] +- ice: fix NULL access of tx->in_use in ice_ptp_ts_irq (Petr Oros) [RHEL-112873] +- ice: Implement PTP support for E830 devices (Petr Oros) [RHEL-112558] +- ice: Refactor ice_ptp_init_tx_* (Petr Oros) [RHEL-112558] +- ice: Add unified ice_capture_crosststamp (Petr Oros) [RHEL-112558] +- ice: Process TSYN IRQ in a separate function (Petr Oros) [RHEL-112558] +- ice: Use FIELD_PREP for timestamp values (Petr Oros) [RHEL-112558] +- ice: Remove unnecessary ice_is_e8xx() functions (Petr Oros) [RHEL-112558] +- ice: Don't check device type when checking GNSS presence (Petr Oros) [RHEL-112558] +- ice: Add in/out PTP pin delays (Petr Oros) [RHEL-112558] +- ice: fix PHY timestamp extraction for ETH56G (Petr Oros) [RHEL-112558] +- ice: Add correct PHY lane assignment (Petr Oros) [RHEL-112683] +- ice: Fix ETH56G FC-FEC Rx offset value (Petr Oros) [RHEL-112683] +- ice: Fix quad registers read on E825 (Petr Oros) [RHEL-112683] +- ice: Fix E825 initialization (Petr Oros) [RHEL-112683] +- tcp: drop secpath at the same time as we currently drop dst (Sabrina Dubroca) [RHEL-82136] +- smb: client: fix use-after-free in cifs_oplock_break (CKI Backport Bot) [RHEL-111196] {CVE-2025-38527} +- i40e: When removing VF MAC filters, only check PF-set MAC (CKI Backport Bot) [RHEL-109571] +- cpufreq/cppc: Don't compare desired_perf in target() (Mark Langsdorf) [RHEL-109525] +- netfilter: nf_conntrack: fix crash due to removal of uninitialised entry (CKI Backport Bot) [RHEL-106432] {CVE-2025-38472} +- sched/rt: Fix race in push_rt_task (Phil Auld) [RHEL-91800] +Resolves: RHEL-106432, RHEL-109525, RHEL-109571, RHEL-111196, RHEL-112558, RHEL-112683, RHEL-112873, RHEL-113039, RHEL-113333, RHEL-113916, RHEL-82136, RHEL-91800 + +* Tue Sep 16 2025 CKI KWF Bot [5.14.0-570.46.1.el9_6] +- net: usb: smsc75xx: Move packet length check to prevent kernel panic in skb_pull (CKI Backport Bot) [RHEL-112246] {CVE-2023-53125} +- net: usb: smsc75xx: Limit packet length to skb->len (CKI Backport Bot) [RHEL-112246] {CVE-2023-53125} +- s390/sclp: Fix SCCB present check (CKI Backport Bot) [RHEL-113558] {CVE-2025-39694} +- use uniform permission checks for all mount propagation changes (Ian Kent) [RHEL-107301] {CVE-2025-38498} +- do_change_type(): refuse to operate on unmounted/not ours mounts (Ian Kent) [RHEL-107301] {CVE-2025-38498} +- usb: dwc3: gadget: check that event count does not exceed event buffer length (CKI Backport Bot) [RHEL-107649] {CVE-2025-37810} +Resolves: RHEL-107301, RHEL-107649, RHEL-112246, RHEL-113558 + +* Sat Sep 13 2025 CKI KWF Bot [5.14.0-570.45.1.el9_6] +- tunnels: Accept PACKET_HOST in skb_tunnel_check_pmtu(). (Adrian Moreno) [RHEL-113279] +- igc: fix lock order in igc_ptp_reset (CKI Backport Bot) [RHEL-108118] +- igc: add lock preventing multiple simultaneous PTM transactions (CKI Backport Bot) [RHEL-108118] +- igc: cleanup PTP module if probe fails (CKI Backport Bot) [RHEL-108118] +- igc: handle the IGC_PTP_ENABLED flag correctly (CKI Backport Bot) [RHEL-108118] +- igc: move ktime snapshot into PTM retry loop (CKI Backport Bot) [RHEL-108118] +- igc: increase wait time before retrying PTM (CKI Backport Bot) [RHEL-108118] +- igc: fix PTM cycle trigger logic (CKI Backport Bot) [RHEL-108118] +- ice: use fixed adapter index for E825C embedded devices (CKI Backport Bot) [RHEL-111766] +- ice: use DSN instead of PCI BDF for ice_adapter index (CKI Backport Bot) [RHEL-111766] +Resolves: RHEL-108118, RHEL-111766, RHEL-113279 + +* Tue Sep 09 2025 Chao YE [5.14.0-570.44.1.el9_6] +- ipv6: mcast: Delay put pmc->idev in mld_del_delrec() (CKI Backport Bot) [RHEL-111149] {CVE-2025-38550} - posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112780] {CVE-2025-38352} - powerpc/pseries/iommu: create DDW for devices with DMA mask less than 64-bits (CKI Backport Bot) [RHEL-113173] -Resolves: RHEL-112780, RHEL-113173 +Resolves: RHEL-111149, RHEL-112780, RHEL-113173 + +* Sat Sep 06 2025 CKI KWF Bot [5.14.0-570.43.1.el9_6] +- eth: bnxt: fix missing ring index trim on error path (CKI Backport Bot) [RHEL-104561] {CVE-2025-37873} +- book3s64/radix : Align section vmemmap start address to PAGE_SIZE (Mamatha Inamdar) [RHEL-109492] +- book3s64/radix: Fix compile errors when CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=n (Mamatha Inamdar) [RHEL-109492] +- net: introduce per netns packet chains (Paolo Abeni) [RHEL-89050] +- enic: fix incorrect MTU comparison in enic_change_mtu() (John Meneghini) [RHEL-108274] +- net/enic: Allow at least 8 RQs to always be used (John Meneghini) [RHEL-108274] +- enic: get max rq & wq entries supported by hw, 16K queues (John Meneghini) [RHEL-106604] +- enic: cleanup of enic wq request completion path (John Meneghini) [RHEL-106604] +- enic: added enic_wq.c and enic_wq.h (John Meneghini) [RHEL-106604] +- enic: remove unused function cq_enet_wq_desc_dec (John Meneghini) [RHEL-106604] +- enic: enable rq extended cq support (John Meneghini) [RHEL-106604] +- enic: enic rq extended cq defines (John Meneghini) [RHEL-106604] +- enic: enic rq code reorg (John Meneghini) [RHEL-106604] +- enic: Move function from header file to c file (John Meneghini) [RHEL-106604] +- enic: add dependency on Page Pool (John Meneghini) [RHEL-106604] +- enic: remove copybreak tunable (John Meneghini) [RHEL-106604] +- enic: Use the Page Pool API for RX (John Meneghini) [RHEL-106604] +- enic: Simplify RX handler function (John Meneghini) [RHEL-106604] +- enic: Move RX functions to their own file (John Meneghini) [RHEL-106604] +- enic: Fix typo in comment in table indexed by link speed (John Meneghini) [RHEL-106604] +- enic: Obtain the Link speed only after the link comes up (John Meneghini) [RHEL-106604] +- enic: Move RX coalescing set function (John Meneghini) [RHEL-106604] +- enic: Move kdump check into enic_adjust_resources() (John Meneghini) [RHEL-106604] +- enic: Move enic resource adjustments to separate function (John Meneghini) [RHEL-106604] +- enic: Adjust used MSI-X wq/rq/cq/interrupt resources in a more robust way (John Meneghini) [RHEL-106604] +- enic: Allocate arrays in enic struct based on VIC config (John Meneghini) [RHEL-106604] +- enic: Save resource counts we read from HW (John Meneghini) [RHEL-106604] +- enic: Make MSI-X I/O interrupts come after the other required ones (John Meneghini) [RHEL-106604] +- enic: Create enic_wq/rq structures to bundle per wq/rq data (John Meneghini) [RHEL-106604] +- enic: Report some per queue statistics in ethtool (John Meneghini) [RHEL-106604] +- enic: Report per queue statistics in netdev qstats (John Meneghini) [RHEL-106604] +- enic: Collect per queue statistics (John Meneghini) [RHEL-106604] +- enic: Use macro instead of static const variables for array sizes (John Meneghini) [RHEL-106604] +- enic: add ethtool get_channel support (John Meneghini) [RHEL-106604] +- enic: Validate length of nl attributes in enic_set_vf_port (John Meneghini) [RHEL-106604] +- enic: Replace hardcoded values for vnic descriptor by defines (John Meneghini) [RHEL-106604] +- enic: Avoid false positive under FORTIFY_SOURCE (John Meneghini) [RHEL-106604] +- scsi: fnic: Fix missing DMA mapping error in fnic_send_frame() (John Meneghini) [RHEL-106420] +- scsi: fnic: Set appropriate logging level for log message (John Meneghini) [RHEL-106420] +- scsi: fnic: Add and improve logs in FDMI and FDMI ABTS paths (John Meneghini) [RHEL-106420] +- scsi: fnic: Turn off FDMI ACTIVE flags on link down (John Meneghini) [RHEL-106420] +- scsi: fnic: Fix crash in fnic_wq_cmpl_handler when FDMI times out (John Meneghini) [RHEL-106420] +- scsi: fnic: Remove unnecessary NUL-terminations (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove redundant flush_workqueue() calls (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove unnecessary spinlock locking and unlocking (John Meneghini) [RHEL-106419] +- scsi: fnic: Replace fnic->lock_flags with local flags (John Meneghini) [RHEL-106419] +- scsi: fnic: Replace use of sizeof with standard usage (John Meneghini) [RHEL-106419] +- scsi: fnic: Fix indentation and remove unnecessary parenthesis (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove unnecessary debug print (John Meneghini) [RHEL-106419] +- scsi: fnic: Propagate SCSI error code from fnic_scsi_drv_init() (John Meneghini) [RHEL-106419] +- scsi: fnic: Test for memory allocation failure and return error code (John Meneghini) [RHEL-106419] +- scsi: fnic: Return appropriate error code from failure of scsi drv init (John Meneghini) [RHEL-106419] +- scsi: fnic: Return appropriate error code for mem alloc failure (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove always-true IS_FNIC_FCP_INITIATOR macro (John Meneghini) [RHEL-106419] +- scsi: fnic: Fix use of uninitialized value in debug message (John Meneghini) [RHEL-106419] +- scsi: fnic: Delete incorrect debugfs error handling (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove unnecessary else to fix warning in FDLS FIP (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove extern definition from .c files (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove unnecessary else and unnecessary break in FDLS (John Meneghini) [RHEL-106419] +- scsi: fnic: Increment driver version (John Meneghini) [RHEL-106419] +- scsi: fnic: Add support to handle port channel RSCN (John Meneghini) [RHEL-106419] +- scsi: fnic: Code cleanup (John Meneghini) [RHEL-106419] +- scsi: fnic: Add stats and related functionality (John Meneghini) [RHEL-106419] +- scsi: fnic: Modify fnic interfaces to use FDLS (John Meneghini) [RHEL-106419] +- scsi: fnic: Modify IO path to use FDLS (John Meneghini) [RHEL-106419] +- scsi: fnic: Add functionality in fnic to support FDLS (John Meneghini) [RHEL-106419] +- scsi: fnic: Add and integrate support for FIP (John Meneghini) [RHEL-106419] +- scsi: fnic: Add and integrate support for FDMI (John Meneghini) [RHEL-106419] +- scsi: fnic: Add Cisco hardware model names (John Meneghini) [RHEL-106419] +- scsi: fnic: Add support for unsolicited requests and responses (John Meneghini) [RHEL-106419] +- scsi: fnic: Add support for target based solicited requests and responses (John Meneghini) [RHEL-106419] +- scsi: fnic: Add support for fabric based solicited requests and responses (John Meneghini) [RHEL-106419] +- scsi: fnic: Add headers and definitions for FDLS (John Meneghini) [RHEL-106419] +- scsi: fnic: Replace shost_printk() with dev_info()/dev_err() (John Meneghini) [RHEL-106419] +- scsi: fnic: Use vcalloc() instead of vmalloc() and memset(0) (John Meneghini) [RHEL-106419] +- scsi: fnic: Move flush_work initialization out of if block (John Meneghini) [RHEL-106419] +- scsi: fnic: Move fnic_fnic_flush_tx() to a work queue (John Meneghini) [RHEL-106419] +- scsi: fnic: Convert snprintf() to sysfs_emit() (John Meneghini) [RHEL-106419] +- scsi: fnic: Clean up some inconsistent indenting (John Meneghini) [RHEL-106419] +- scsi: fnic: unlock on error path in fnic_queuecommand() (John Meneghini) [RHEL-106419] +- scsi: fnic: Increment driver version (John Meneghini) [RHEL-106419] +- scsi: fnic: Improve logs and add support for multiqueue (MQ) (John Meneghini) [RHEL-106419] +- scsi: fnic: Add support for multiqueue (MQ) in fnic driver (John Meneghini) [RHEL-106419] +- scsi: fnic: Add support for multiqueue (MQ) in fnic_main.c (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove usage of host_lock (John Meneghini) [RHEL-106419] +- scsi: fnic: Define stats to track multiqueue (MQ) IOs (John Meneghini) [RHEL-106419] +- scsi: fnic: Modify ISRs to support multiqueue (MQ) (John Meneghini) [RHEL-106419] +- scsi: fnic: Refactor and redefine fnic.h for multiqueue (John Meneghini) [RHEL-106419] +- scsi: fnic: Get copy workqueue count and interrupt mode from config (John Meneghini) [RHEL-106419] +- scsi: fnic: Rename wq_copy to hw_copy_wq (John Meneghini) [RHEL-106419] +- scsi: fnic: Add and improve log messages (John Meneghini) [RHEL-106419] +- scsi: fnic: Add and use fnic number (John Meneghini) [RHEL-106419] +- scsi: fnic: Modify definitions to sync with VIC firmware (John Meneghini) [RHEL-106419] +- scsi: fnic: Return error if vmalloc() failed (John Meneghini) [RHEL-106419] +- scsi: fnic: Clean up some inconsistent indenting (John Meneghini) [RHEL-106419] +- scsi: fnic: Fix sg_reset success path (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove unused functions fnic_scsi_host_start/end_tag() (John Meneghini) [RHEL-106419] +- scsi: fnic: Replace sgreset tag with max_tag_id (John Meneghini) [RHEL-106419] +- scsi: fnic: Replace return codes in fnic_clean_pending_aborts() (John Meneghini) [RHEL-106419] +- scsi: fnic: Use vmalloc_array() and vcalloc() (John Meneghini) [RHEL-106419] +- scsi: fnic: Use vzalloc() (John Meneghini) [RHEL-106419] +- scsi: fnic: Refactor code in fnic probe to initialize SCSI layer (John Meneghini) [RHEL-106419] +- scsi: fnic: Replace DMA mask of 64 bits with 47 bits (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove unneeded flush_workqueue() (John Meneghini) [RHEL-106419] +- scsi: fnic: Remove redundant NULL check (John Meneghini) [RHEL-106419] +- scsi: fnic: Stop using the SCSI pointer (John Meneghini) [RHEL-106419] +- scsi: fnic: Fix a tracing statement (John Meneghini) [RHEL-106419] +- scsi: fnic: Call scsi_done() directly (John Meneghini) [RHEL-106419] +- Revert "driver core: Fix uevent_show() vs driver detach race" (Mark Langsdorf) [RHEL-85410] +Resolves: RHEL-104561, RHEL-106419, RHEL-106420, RHEL-106604, RHEL-108274, RHEL-109492, RHEL-85410, RHEL-89050 * Sat Aug 30 2025 CKI KWF Bot [5.14.0-570.42.1.el9_6] - powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory (Mamatha Inamdar) [RHEL-103015] diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 40dacef449611..7326bdff0ae8d 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -4525,6 +4525,7 @@ static int trace__replay(struct trace *trace) struct evsel *evsel; int err = -1; + perf_tool__init(&trace->tool, /*ordered_events=*/true); trace->tool.sample = trace__process_sample; trace->tool.mmap = perf_event__process_mmap; trace->tool.mmap2 = perf_event__process_mmap2; diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 6f573be09e018..45f6238005fdb 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -155,13 +155,12 @@ static int tls_send_cmsg(int fd, unsigned char record_type, return sendmsg(fd, &msg, flags); } -static int tls_recv_cmsg(struct __test_metadata *_metadata, - int fd, unsigned char record_type, - void *data, size_t len, int flags) +static int __tls_recv_cmsg(struct __test_metadata *_metadata, + int fd, unsigned char *ctype, + void *data, size_t len, int flags) { char cbuf[CMSG_SPACE(sizeof(char))]; struct cmsghdr *cmsg; - unsigned char ctype; struct msghdr msg; struct iovec vec; int n; @@ -180,7 +179,20 @@ static int tls_recv_cmsg(struct __test_metadata *_metadata, EXPECT_NE(cmsg, NULL); EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - ctype = *((unsigned char *)CMSG_DATA(cmsg)); + if (ctype) + *ctype = *((unsigned char *)CMSG_DATA(cmsg)); + + return n; +} + +static int tls_recv_cmsg(struct __test_metadata *_metadata, + int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + unsigned char ctype; + int n; + + n = __tls_recv_cmsg(_metadata, fd, &ctype, data, len, flags); EXPECT_EQ(ctype, record_type); return n; @@ -1599,6 +1611,283 @@ TEST_F(tls, recv_efault) EXPECT_EQ(memcmp(rec2, recv_mem + 9, ret - 9), 0); } +struct raw_rec { + unsigned int plain_len; + unsigned char plain_data[100]; + unsigned int cipher_len; + unsigned char cipher_data[128]; +}; + +/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: 'Hello world' */ +static const struct raw_rec id0_data_l11 = { + .plain_len = 11, + .plain_data = { + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, + 0x72, 0x6c, 0x64, + }, + .cipher_len = 40, + .cipher_data = { + 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xa2, 0x33, + 0xde, 0x8d, 0x94, 0xf0, 0x29, 0x6c, 0xb1, 0xaf, + 0x6a, 0x75, 0xb2, 0x93, 0xad, 0x45, 0xd5, 0xfd, + 0x03, 0x51, 0x57, 0x8f, 0xf9, 0xcc, 0x3b, 0x42, + }, +}; + +/* TLS 1.2, AES_CCM, ctrl, seqno:0, plaintext: '' */ +static const struct raw_rec id0_ctrl_l0 = { + .plain_len = 0, + .plain_data = { + }, + .cipher_len = 29, + .cipher_data = { + 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x38, 0x7b, + 0xa6, 0x1c, 0xdd, 0xa7, 0x19, 0x33, 0xab, 0xae, + 0x88, 0xe1, 0xd2, 0x08, 0x4f, + }, +}; + +/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: '' */ +static const struct raw_rec id0_data_l0 = { + .plain_len = 0, + .plain_data = { + }, + .cipher_len = 29, + .cipher_data = { + 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x37, 0x90, + 0x70, 0x45, 0x89, 0xfb, 0x5c, 0xc7, 0x89, 0x03, + 0x68, 0x80, 0xd3, 0xd8, 0xcc, + }, +}; + +/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: 'Hello world' */ +static const struct raw_rec id1_data_l11 = { + .plain_len = 11, + .plain_data = { + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, + 0x72, 0x6c, 0x64, + }, + .cipher_len = 40, + .cipher_data = { + 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x1a, 0x9c, + 0xd0, 0xa8, 0x9a, 0xd6, 0x69, 0xd6, 0x1a, 0xe3, + 0xb5, 0x1f, 0x0d, 0x2c, 0xe2, 0x97, 0x46, 0xff, + 0x2b, 0xcc, 0x5a, 0xc4, 0xa3, 0xb9, 0xef, 0xba, + }, +}; + +/* TLS 1.2, AES_CCM, ctrl, seqno:1, plaintext: '' */ +static const struct raw_rec id1_ctrl_l0 = { + .plain_len = 0, + .plain_data = { + }, + .cipher_len = 29, + .cipher_data = { + 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0xf0, 0xfe, + 0xee, 0xd9, 0xe2, 0x5d, 0xc7, 0x11, 0x4c, 0xe6, + 0xb4, 0x7e, 0xef, 0x40, 0x2b, + }, +}; + +/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: '' */ +static const struct raw_rec id1_data_l0 = { + .plain_len = 0, + .plain_data = { + }, + .cipher_len = 29, + .cipher_data = { + 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0xfc, 0x86, + 0xc8, 0xf0, 0x55, 0xf9, 0x47, 0x3f, 0x74, 0xdc, + 0xc9, 0xbf, 0xfe, 0x5b, 0xb1, + }, +}; + +/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: 'Hello world' */ +static const struct raw_rec id2_ctrl_l11 = { + .plain_len = 11, + .plain_data = { + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, + 0x72, 0x6c, 0x64, + }, + .cipher_len = 40, + .cipher_data = { + 0x16, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19, + 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87, + 0x2a, 0x04, 0x11, 0x3d, 0xf8, 0x64, 0x5f, 0x36, + 0x8b, 0xa8, 0xee, 0x4c, 0x6d, 0x62, 0xa5, 0x00, + }, +}; + +/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: 'Hello world' */ +static const struct raw_rec id2_data_l11 = { + .plain_len = 11, + .plain_data = { + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, + 0x72, 0x6c, 0x64, + }, + .cipher_len = 40, + .cipher_data = { + 0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19, + 0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87, + 0x8e, 0xa1, 0xd0, 0xcd, 0x33, 0xb5, 0x86, 0x2b, + 0x17, 0xf1, 0x52, 0x2a, 0x55, 0x62, 0x65, 0x11, + }, +}; + +/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: '' */ +static const struct raw_rec id2_ctrl_l0 = { + .plain_len = 0, + .plain_data = { + }, + .cipher_len = 29, + .cipher_data = { + 0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x5c, 0x0e, + 0x41, 0xdd, 0xba, 0xd3, 0xcc, 0xcf, 0x6d, 0xd9, + 0x06, 0xdb, 0x79, 0xe5, 0x5d, + }, +}; + +/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: '' */ +static const struct raw_rec id2_data_l0 = { + .plain_len = 0, + .plain_data = { + }, + .cipher_len = 29, + .cipher_data = { + 0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0xc3, 0xca, 0x26, + 0x22, 0xe4, 0x25, 0xfb, 0x5f, 0x6d, 0xbf, 0x83, + 0x30, 0x48, 0x69, 0x1a, 0x47, + }, +}; + +FIXTURE(zero_len) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_VARIANT(zero_len) +{ + const struct raw_rec *recs[4]; + ssize_t recv_ret[4]; +}; + +FIXTURE_VARIANT_ADD(zero_len, data_data_data) +{ + .recs = { &id0_data_l11, &id1_data_l11, &id2_data_l11, }, + .recv_ret = { 33, -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, data_0ctrl_data) +{ + .recs = { &id0_data_l11, &id1_ctrl_l0, &id2_data_l11, }, + .recv_ret = { 11, 0, 11, -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0data) +{ + .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l0, }, + .recv_ret = { -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, 0data_0data_ctrl) +{ + .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l11, }, + .recv_ret = { 0, 11, -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0ctrl) +{ + .recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l0, }, + .recv_ret = { 0, 0, -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, 0ctrl_0ctrl_0ctrl) +{ + .recs = { &id0_ctrl_l0, &id1_ctrl_l0, &id2_ctrl_l0, }, + .recv_ret = { 0, 0, 0, -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, 0data_0data_data) +{ + .recs = { &id0_data_l0, &id1_data_l0, &id2_data_l11, }, + .recv_ret = { 11, -EAGAIN, }, +}; + +FIXTURE_VARIANT_ADD(zero_len, data_0data_0data) +{ + .recs = { &id0_data_l11, &id1_data_l0, &id2_data_l0, }, + .recv_ret = { 11, -EAGAIN, }, +}; + +FIXTURE_SETUP(zero_len) +{ + struct tls_crypto_info_keys tls12; + int ret; + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128, &tls12); + + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); + if (self->notls) + return; + + /* Don't install keys on fd, we'll send raw records */ + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); +} + +FIXTURE_TEARDOWN(zero_len) +{ + close(self->fd); + close(self->cfd); +} + +TEST_F(zero_len, test) +{ + const struct raw_rec *const *rec; + unsigned char buf[128]; + int rec_off; + int i; + + for (i = 0; i < 4 && variant->recs[i]; i++) + EXPECT_EQ(send(self->fd, variant->recs[i]->cipher_data, + variant->recs[i]->cipher_len, 0), + variant->recs[i]->cipher_len); + + rec = &variant->recs[0]; + rec_off = 0; + for (i = 0; i < 4; i++) { + int j, ret; + + ret = variant->recv_ret[i] >= 0 ? variant->recv_ret[i] : -1; + EXPECT_EQ(__tls_recv_cmsg(_metadata, self->cfd, NULL, + buf, sizeof(buf), MSG_DONTWAIT), ret); + if (ret == -1) + EXPECT_EQ(errno, -variant->recv_ret[i]); + if (variant->recv_ret[i] == -EAGAIN) + break; + + for (j = 0; j < ret; j++) { + while (rec_off == (*rec)->plain_len) { + rec++; + rec_off = 0; + } + EXPECT_EQ(buf[j], (*rec)->plain_data[rec_off]); + rec_off++; + } + } +}; + FIXTURE(tls_err) { int fd, cfd;