Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 84e92c1

Browse files
committedJun 1, 2018
Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-4.18/block
Pull NVMe changes from Christoph: "Below is another set of NVMe updates for 4.18. Besides the usual bug fixes this includes more feature completness in terms of AEN and log page handling on the target." * 'nvme-4.18' of git://git.infradead.org/nvme: nvme: use the changed namespaces list log to clear ns data changed AENs nvme: mark nvme_queue_scan static nvme: submit AEN event configuration on startup nvmet: mask pending AENs nvmet: add AEN configuration support nvmet: implement the changed namespaces log nvmet: split log page implementation nvmet: add a new nvmet_zero_sgl helper nvme.h: add AEN configuration symbols nvme.h: add the changed namespace list log nvme.h: untangle AEN notice definitions nvmet: fix error return code in nvmet_file_ns_enable() nvmet: fix a typo in nvmet_file_ns_enable() nvme-fabrics: allow internal passthrough command on deleting controllers nvme-loop: add support for multiple ports nvme-pci: simplify __nvme_submit_cmd nvme-pci: Rate limit the nvme timeout warnings nvme: allow duplicate controller if prior controller being deleted
2 parents 131d08e + 30d9096 commit 84e92c1

File tree

11 files changed

+351
-193
lines changed

11 files changed

+351
-193
lines changed
 

‎drivers/nvme/host/core.c

Lines changed: 90 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,15 @@ static struct class *nvme_subsys_class;
100100
static void nvme_ns_remove(struct nvme_ns *ns);
101101
static int nvme_revalidate_disk(struct gendisk *disk);
102102

103+
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
104+
{
105+
/*
106+
* Only new queue scan work when admin and IO queues are both alive
107+
*/
108+
if (ctrl->state == NVME_CTRL_LIVE)
109+
queue_work(nvme_wq, &ctrl->scan_work);
110+
}
111+
103112
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
104113
{
105114
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -1027,6 +1036,21 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
10271036
}
10281037
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
10291038

1039+
#define NVME_AEN_SUPPORTED \
1040+
(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT)
1041+
1042+
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1043+
{
1044+
u32 result;
1045+
int status;
1046+
1047+
status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
1048+
ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
1049+
if (status)
1050+
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1051+
ctrl->oaes & NVME_AEN_SUPPORTED);
1052+
}
1053+
10301054
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
10311055
{
10321056
struct nvme_user_io io;
@@ -2344,6 +2368,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
23442368

23452369
ctrl->oacs = le16_to_cpu(id->oacs);
23462370
ctrl->oncs = le16_to_cpup(&id->oncs);
2371+
ctrl->oaes = le32_to_cpu(id->oaes);
23472372
atomic_set(&ctrl->abort_limit, id->acl + 1);
23482373
ctrl->vwc = id->vwc;
23492374
ctrl->cntlid = le16_to_cpup(&id->cntlid);
@@ -3166,6 +3191,42 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
31663191
nvme_remove_invalid_namespaces(ctrl, nn);
31673192
}
31683193

3194+
static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
3195+
{
3196+
size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
3197+
__le32 *log;
3198+
int error, i;
3199+
bool ret = false;
3200+
3201+
log = kzalloc(log_size, GFP_KERNEL);
3202+
if (!log)
3203+
return false;
3204+
3205+
error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
3206+
if (error) {
3207+
dev_warn(ctrl->device,
3208+
"reading changed ns log failed: %d\n", error);
3209+
goto out_free_log;
3210+
}
3211+
3212+
if (log[0] == cpu_to_le32(0xffffffff))
3213+
goto out_free_log;
3214+
3215+
for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
3216+
u32 nsid = le32_to_cpu(log[i]);
3217+
3218+
if (nsid == 0)
3219+
break;
3220+
dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
3221+
nvme_validate_ns(ctrl, nsid);
3222+
}
3223+
ret = true;
3224+
3225+
out_free_log:
3226+
kfree(log);
3227+
return ret;
3228+
}
3229+
31693230
static void nvme_scan_work(struct work_struct *work)
31703231
{
31713232
struct nvme_ctrl *ctrl =
@@ -3178,33 +3239,30 @@ static void nvme_scan_work(struct work_struct *work)
31783239

31793240
WARN_ON_ONCE(!ctrl->tagset);
31803241

3242+
if (test_and_clear_bit(EVENT_NS_CHANGED, &ctrl->events)) {
3243+
if (nvme_scan_changed_ns_log(ctrl))
3244+
goto out_sort_namespaces;
3245+
dev_info(ctrl->device, "rescanning namespaces.\n");
3246+
}
3247+
31813248
if (nvme_identify_ctrl(ctrl, &id))
31823249
return;
31833250

31843251
nn = le32_to_cpu(id->nn);
31853252
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
31863253
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
31873254
if (!nvme_scan_ns_list(ctrl, nn))
3188-
goto done;
3255+
goto out_free_id;
31893256
}
31903257
nvme_scan_ns_sequential(ctrl, nn);
3191-
done:
3258+
out_free_id:
3259+
kfree(id);
3260+
out_sort_namespaces:
31923261
down_write(&ctrl->namespaces_rwsem);
31933262
list_sort(NULL, &ctrl->namespaces, ns_cmp);
31943263
up_write(&ctrl->namespaces_rwsem);
3195-
kfree(id);
31963264
}
31973265

3198-
void nvme_queue_scan(struct nvme_ctrl *ctrl)
3199-
{
3200-
/*
3201-
* Only new queue scan work when admin and IO queues are both alive
3202-
*/
3203-
if (ctrl->state == NVME_CTRL_LIVE)
3204-
queue_work(nvme_wq, &ctrl->scan_work);
3205-
}
3206-
EXPORT_SYMBOL_GPL(nvme_queue_scan);
3207-
32083266
/*
32093267
* This function iterates the namespace list unlocked to allow recovery from
32103268
* controller failure. It is up to the caller to ensure the namespace list is
@@ -3318,6 +3376,21 @@ static void nvme_fw_act_work(struct work_struct *work)
33183376
nvme_get_fw_slot_info(ctrl);
33193377
}
33203378

3379+
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
3380+
{
3381+
switch ((result & 0xff00) >> 8) {
3382+
case NVME_AER_NOTICE_NS_CHANGED:
3383+
set_bit(EVENT_NS_CHANGED, &ctrl->events);
3384+
nvme_queue_scan(ctrl);
3385+
break;
3386+
case NVME_AER_NOTICE_FW_ACT_STARTING:
3387+
queue_work(nvme_wq, &ctrl->fw_act_work);
3388+
break;
3389+
default:
3390+
dev_warn(ctrl->device, "async event result %08x\n", result);
3391+
}
3392+
}
3393+
33213394
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
33223395
volatile union nvme_result *res)
33233396
{
@@ -3327,6 +3400,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
33273400
return;
33283401

33293402
switch (result & 0x7) {
3403+
case NVME_AER_NOTICE:
3404+
nvme_handle_aen_notice(ctrl, result);
3405+
break;
33303406
case NVME_AER_ERROR:
33313407
case NVME_AER_SMART:
33323408
case NVME_AER_CSS:
@@ -3336,18 +3412,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
33363412
default:
33373413
break;
33383414
}
3339-
3340-
switch (result & 0xff07) {
3341-
case NVME_AER_NOTICE_NS_CHANGED:
3342-
dev_info(ctrl->device, "rescanning\n");
3343-
nvme_queue_scan(ctrl);
3344-
break;
3345-
case NVME_AER_NOTICE_FW_ACT_STARTING:
3346-
queue_work(nvme_wq, &ctrl->fw_act_work);
3347-
break;
3348-
default:
3349-
dev_warn(ctrl->device, "async event result %08x\n", result);
3350-
}
33513415
queue_work(nvme_wq, &ctrl->async_event_work);
33523416
}
33533417
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
@@ -3370,6 +3434,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
33703434

33713435
if (ctrl->queue_count > 1) {
33723436
nvme_queue_scan(ctrl);
3437+
nvme_enable_aen(ctrl);
33733438
queue_work(nvme_wq, &ctrl->async_event_work);
33743439
nvme_start_queues(ctrl);
33753440
}

‎drivers/nvme/host/fabrics.c

Lines changed: 31 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -545,71 +545,54 @@ blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
545545
return BLK_STS_OK;
546546

547547
switch (ctrl->state) {
548-
case NVME_CTRL_DELETING:
549-
goto reject_io;
550-
551548
case NVME_CTRL_NEW:
552549
case NVME_CTRL_CONNECTING:
550+
case NVME_CTRL_DELETING:
551+
/*
552+
* This is the case of starting a new or deleting an association
553+
* but connectivity was lost before it was fully created or torn
554+
* down. We need to error the commands used to initialize the
555+
* controller so the reconnect can go into a retry attempt. The
556+
* commands should all be marked REQ_FAILFAST_DRIVER, which will
557+
* hit the reject path below. Anything else will be queued while
558+
* the state settles.
559+
*/
553560
if (!is_connected)
554-
/*
555-
* This is the case of starting a new
556-
* association but connectivity was lost
557-
* before it was fully created. We need to
558-
* error the commands used to initialize the
559-
* controller so the reconnect can go into a
560-
* retry attempt. The commands should all be
561-
* marked REQ_FAILFAST_DRIVER, which will hit
562-
* the reject path below. Anything else will
563-
* be queued while the state settles.
564-
*/
565-
goto reject_or_queue_io;
566-
567-
if ((queue_live &&
568-
!(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
569-
(!queue_live && blk_rq_is_passthrough(rq) &&
570-
cmd->common.opcode == nvme_fabrics_command &&
571-
cmd->fabrics.fctype == nvme_fabrics_type_connect))
572-
/*
573-
* If queue is live, allow only commands that
574-
* are internally generated pass through. These
575-
* are commands on the admin queue to initialize
576-
* the controller. This will reject any ioctl
577-
* admin cmds received while initializing.
578-
*
579-
* If the queue is not live, allow only a
580-
* connect command. This will reject any ioctl
581-
* admin cmd as well as initialization commands
582-
* if the controller reverted the queue to non-live.
583-
*/
561+
break;
562+
563+
/*
564+
* If queue is live, allow only commands that are internally
565+
* generated pass through. These are commands on the admin
566+
* queue to initialize the controller. This will reject any
567+
* ioctl admin cmds received while initializing.
568+
*/
569+
if (queue_live && !(nvme_req(rq)->flags & NVME_REQ_USERCMD))
584570
return BLK_STS_OK;
585571

586572
/*
587-
* fall-thru to the reject_or_queue_io clause
573+
* If the queue is not live, allow only a connect command. This
574+
* will reject any ioctl admin cmd as well as initialization
575+
* commands if the controller reverted the queue to non-live.
588576
*/
577+
if (!queue_live && blk_rq_is_passthrough(rq) &&
578+
cmd->common.opcode == nvme_fabrics_command &&
579+
cmd->fabrics.fctype == nvme_fabrics_type_connect)
580+
return BLK_STS_OK;
589581
break;
590-
591-
/* these cases fall-thru
592-
* case NVME_CTRL_LIVE:
593-
* case NVME_CTRL_RESETTING:
594-
*/
595582
default:
596583
break;
597584
}
598585

599-
reject_or_queue_io:
600586
/*
601-
* Any other new io is something we're not in a state to send
602-
* to the device. Default action is to busy it and retry it
603-
* after the controller state is recovered. However, anything
604-
* marked for failfast or nvme multipath is immediately failed.
605-
* Note: commands used to initialize the controller will be
606-
* marked for failfast.
587+
* Any other new io is something we're not in a state to send to the
588+
* device. Default action is to busy it and retry it after the
589+
* controller state is recovered. However, anything marked for failfast
590+
* or nvme multipath is immediately failed. Note: commands used to
591+
* initialize the controller will be marked for failfast.
607592
* Note: nvme cli/ioctl commands are marked for failfast.
608593
*/
609594
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
610595
return BLK_STS_RESOURCE;
611-
612-
reject_io:
613596
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
614597
return BLK_STS_IOERR;
615598
}

‎drivers/nvme/host/fabrics.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,9 @@ static inline bool
139139
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
140140
struct nvmf_ctrl_options *opts)
141141
{
142-
if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
142+
if (ctrl->state == NVME_CTRL_DELETING ||
143+
ctrl->state == NVME_CTRL_DEAD ||
144+
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
143145
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
144146
memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
145147
return false;

‎drivers/nvme/host/nvme.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ struct nvme_ctrl {
176176
u16 kas;
177177
u8 npss;
178178
u8 apsta;
179+
u32 oaes;
179180
u32 aen_result;
180181
unsigned int shutdown_timeout;
181182
unsigned int kato;
@@ -188,6 +189,8 @@ struct nvme_ctrl {
188189
struct delayed_work ka_work;
189190
struct nvme_command ka_cmd;
190191
struct work_struct fw_act_work;
192+
#define EVENT_NS_CHANGED (1 << 0)
193+
unsigned long events;
191194

192195
/* Power saving configuration */
193196
u64 ps_max_latency_us;
@@ -394,7 +397,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
394397
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
395398
int nvme_init_identify(struct nvme_ctrl *ctrl);
396399

397-
void nvme_queue_scan(struct nvme_ctrl *ctrl);
398400
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
399401

400402
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
There was a problem loading the remainder of the diff.

0 commit comments

Comments
 (0)
Failed to load comments.