Skip to content

Commit 895427b

Browse files
jsmart-ghmartinkpetersen
authored andcommitted
scsi: lpfc: NVME Initiator: Base modifications
NVME Initiator: Base modifications This patch adds base modifications for NVME initiator support. The base modifications consist of: - Formal split of SLI3 rings from SLI-4 WQs (sometimes referred to as rings as well) as implementation now widely varies between the two. - Addition of configuration modes: SCSI initiator only; NVME initiator only; NVME target only; and SCSI and NVME initiator. The configuration mode drives overall adapter configuration, offloads enabled, and resource splits. NVME support is only available on SLI-4 devices and newer fw. - Implements the following based on configuration mode: - Exchange resources are split by protocol; Obviously, if only 1 mode, then no split occurs. Default is 50/50. module attribute allows tuning. - Pools and config parameters are separated per-protocol - Each protocol has it's own set of queues, but share interrupt vectors. SCSI: SLI3 devices have few queues and the original style of queue allocation remains. SLI4 devices piggy back on an "io-channel" concept that eventually needs to merge with scsi-mq/blk-mq support (it is underway). For now, the paradigm continues as it existed prior. io channel allocates N msix and N WQs (N=4 default) and either round robins or uses cpu # modulo N for scheduling. A bunch of module parameters allow the configuration to be tuned. NVME (initiator): Allocates an msix per cpu (or whatever pci_alloc_irq_vectors gets) Allocates a WQ per cpu, and maps the WQs to msix on a WQ # modulo msix vector count basis. Module parameters exist to cap/control the config if desired. - Each protocol has its own buffer and dma pools. I apologize for the size of the patch. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <james.smart@broadcom.com> ---- Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1 parent 1d9d5a9 commit 895427b

22 files changed

+3353
-1634
lines changed

drivers/scsi/lpfc/lpfc.h

Lines changed: 71 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
*******************************************************************/
2121

2222
#include <scsi/scsi_host.h>
23+
#include <linux/ktime.h>
2324

2425
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
2526
#define CONFIG_SCSI_LPFC_DEBUG_FS
@@ -53,6 +54,7 @@ struct lpfc_sli2_slim;
5354
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
5455
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
5556
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
57+
#define LPFC_MIN_NVME_SEG_CNT 254
5658

5759
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
5860
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -114,6 +116,13 @@ enum lpfc_polling_flags {
114116
DISABLE_FCP_RING_INT = 0x2
115117
};
116118

119+
struct perf_prof {
120+
uint16_t cmd_cpu[40];
121+
uint16_t rsp_cpu[40];
122+
uint16_t qh_cpu[40];
123+
uint16_t wqidx[40];
124+
};
125+
117126
/* Provide DMA memory definitions the driver uses per port instance. */
118127
struct lpfc_dmabuf {
119128
struct list_head list;
@@ -131,10 +140,24 @@ struct lpfc_dma_pool {
131140
struct hbq_dmabuf {
132141
struct lpfc_dmabuf hbuf;
133142
struct lpfc_dmabuf dbuf;
134-
uint32_t size;
143+
uint16_t total_size;
144+
uint16_t bytes_recv;
135145
uint32_t tag;
136146
struct lpfc_cq_event cq_event;
137147
unsigned long time_stamp;
148+
void *context;
149+
};
150+
151+
struct rqb_dmabuf {
152+
struct lpfc_dmabuf hbuf;
153+
struct lpfc_dmabuf dbuf;
154+
uint16_t total_size;
155+
uint16_t bytes_recv;
156+
void *context;
157+
struct lpfc_iocbq *iocbq;
158+
struct lpfc_sglq *sglq;
159+
struct lpfc_queue *hrq; /* ptr to associated Header RQ */
160+
struct lpfc_queue *drq; /* ptr to associated Data RQ */
138161
};
139162

140163
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -442,6 +465,11 @@ struct lpfc_vport {
442465
uint16_t fdmi_num_disc;
443466
uint32_t fdmi_hba_mask;
444467
uint32_t fdmi_port_mask;
468+
469+
/* There is a single nvme instance per vport. */
470+
struct nvme_fc_local_port *localport;
471+
uint8_t nvmei_support; /* driver supports NVME Initiator */
472+
uint32_t last_fcp_wqidx;
445473
};
446474

447475
struct hbq_s {
@@ -459,10 +487,9 @@ struct hbq_s {
459487
struct hbq_dmabuf *);
460488
};
461489

462-
#define LPFC_MAX_HBQS 4
463490
/* this matches the position in the lpfc_hbq_defs array */
464491
#define LPFC_ELS_HBQ 0
465-
#define LPFC_EXTRA_HBQ 1
492+
#define LPFC_MAX_HBQS 1
466493

467494
enum hba_temp_state {
468495
HBA_NORMAL_TEMP,
@@ -652,6 +679,8 @@ struct lpfc_hba {
652679
* Firmware supports Forced Link Speed
653680
* capability
654681
*/
682+
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
683+
655684
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
656685
struct lpfc_dmabuf slim2p;
657686

@@ -700,6 +729,8 @@ struct lpfc_hba {
700729
uint8_t wwpn[8];
701730
uint32_t RandomData[7];
702731
uint8_t fcp_embed_io;
732+
uint8_t nvme_support; /* Firmware supports NVME */
733+
uint8_t nvmet_support; /* driver supports NVMET */
703734
uint8_t mds_diags_support;
704735

705736
/* HBA Config Parameters */
@@ -725,6 +756,9 @@ struct lpfc_hba {
725756
uint32_t cfg_fcp_imax;
726757
uint32_t cfg_fcp_cpu_map;
727758
uint32_t cfg_fcp_io_channel;
759+
uint32_t cfg_nvme_oas;
760+
uint32_t cfg_nvme_io_channel;
761+
uint32_t cfg_nvme_enable_fb;
728762
uint32_t cfg_total_seg_cnt;
729763
uint32_t cfg_sg_seg_cnt;
730764
uint32_t cfg_sg_dma_buf_size;
@@ -770,6 +804,12 @@ struct lpfc_hba {
770804
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
771805
uint32_t cfg_enable_SmartSAN;
772806
uint32_t cfg_enable_mds_diags;
807+
uint32_t cfg_enable_fc4_type;
808+
uint32_t cfg_xri_split;
809+
#define LPFC_ENABLE_FCP 1
810+
#define LPFC_ENABLE_NVME 2
811+
#define LPFC_ENABLE_BOTH 3
812+
uint32_t io_channel_irqs; /* number of irqs for io channels */
773813
lpfc_vpd_t vpd; /* vital product data */
774814

775815
struct pci_dev *pcidev;
@@ -784,11 +824,11 @@ struct lpfc_hba {
784824
unsigned long data_flags;
785825

786826
uint32_t hbq_in_use; /* HBQs in use flag */
787-
struct list_head rb_pend_list; /* Received buffers to be processed */
788827
uint32_t hbq_count; /* Count of configured HBQs */
789828
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
790829

791-
atomic_t fcp_qidx; /* next work queue to post work to */
830+
atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
831+
atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
792832

793833
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
794834
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
@@ -843,9 +883,17 @@ struct lpfc_hba {
843883
/*
844884
* stat counters
845885
*/
846-
uint64_t fc4InputRequests;
847-
uint64_t fc4OutputRequests;
848-
uint64_t fc4ControlRequests;
886+
uint64_t fc4ScsiInputRequests;
887+
uint64_t fc4ScsiOutputRequests;
888+
uint64_t fc4ScsiControlRequests;
889+
uint64_t fc4ScsiIoCmpls;
890+
uint64_t fc4NvmeInputRequests;
891+
uint64_t fc4NvmeOutputRequests;
892+
uint64_t fc4NvmeControlRequests;
893+
uint64_t fc4NvmeIoCmpls;
894+
uint64_t fc4NvmeLsRequests;
895+
uint64_t fc4NvmeLsCmpls;
896+
849897
uint64_t bg_guard_err_cnt;
850898
uint64_t bg_apptag_err_cnt;
851899
uint64_t bg_reftag_err_cnt;
@@ -856,17 +904,23 @@ struct lpfc_hba {
856904
struct list_head lpfc_scsi_buf_list_get;
857905
struct list_head lpfc_scsi_buf_list_put;
858906
uint32_t total_scsi_bufs;
907+
spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */
908+
spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */
909+
struct list_head lpfc_nvme_buf_list_get;
910+
struct list_head lpfc_nvme_buf_list_put;
911+
uint32_t total_nvme_bufs;
859912
struct list_head lpfc_iocb_list;
860913
uint32_t total_iocbq_bufs;
861914
struct list_head active_rrq_list;
862915
spinlock_t hbalock;
863916

864917
/* pci_mem_pools */
865-
struct pci_pool *lpfc_scsi_dma_buf_pool;
918+
struct pci_pool *lpfc_sg_dma_buf_pool;
866919
struct pci_pool *lpfc_mbuf_pool;
867920
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
868921
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
869922
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
923+
struct pci_pool *txrdy_payload_pool;
870924
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
871925

872926
mempool_t *mbox_mem_pool;
@@ -1092,3 +1146,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
10921146

10931147
return 0;
10941148
}
1149+
1150+
static inline struct lpfc_sli_ring *
1151+
lpfc_phba_elsring(struct lpfc_hba *phba)
1152+
{
1153+
if (phba->sli_rev == LPFC_SLI_REV4)
1154+
return phba->sli4_hba.els_wq->pring;
1155+
return &phba->sli.sli3_ring[LPFC_ELS_RING];
1156+
}

0 commit comments

Comments
 (0)