@@ -100,6 +100,15 @@ static struct class *nvme_subsys_class;
100
100
static void nvme_ns_remove (struct nvme_ns * ns );
101
101
static int nvme_revalidate_disk (struct gendisk * disk );
102
102
103
+ static void nvme_queue_scan (struct nvme_ctrl * ctrl )
104
+ {
105
+ /*
106
+ * Only new queue scan work when admin and IO queues are both alive
107
+ */
108
+ if (ctrl -> state == NVME_CTRL_LIVE )
109
+ queue_work (nvme_wq , & ctrl -> scan_work );
110
+ }
111
+
103
112
int nvme_reset_ctrl (struct nvme_ctrl * ctrl )
104
113
{
105
114
if (!nvme_change_ctrl_state (ctrl , NVME_CTRL_RESETTING ))
@@ -1027,6 +1036,21 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1027
1036
}
1028
1037
EXPORT_SYMBOL_GPL (nvme_set_queue_count );
1029
1038
1039
+ #define NVME_AEN_SUPPORTED \
1040
+ (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT)
1041
+
1042
+ static void nvme_enable_aen (struct nvme_ctrl * ctrl )
1043
+ {
1044
+ u32 result ;
1045
+ int status ;
1046
+
1047
+ status = nvme_set_features (ctrl , NVME_FEAT_ASYNC_EVENT ,
1048
+ ctrl -> oaes & NVME_AEN_SUPPORTED , NULL , 0 , & result );
1049
+ if (status )
1050
+ dev_warn (ctrl -> device , "Failed to configure AEN (cfg %x)\n" ,
1051
+ ctrl -> oaes & NVME_AEN_SUPPORTED );
1052
+ }
1053
+
1030
1054
static int nvme_submit_io (struct nvme_ns * ns , struct nvme_user_io __user * uio )
1031
1055
{
1032
1056
struct nvme_user_io io ;
@@ -2344,6 +2368,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2344
2368
2345
2369
ctrl -> oacs = le16_to_cpu (id -> oacs );
2346
2370
ctrl -> oncs = le16_to_cpup (& id -> oncs );
2371
+ ctrl -> oaes = le32_to_cpu (id -> oaes );
2347
2372
atomic_set (& ctrl -> abort_limit , id -> acl + 1 );
2348
2373
ctrl -> vwc = id -> vwc ;
2349
2374
ctrl -> cntlid = le16_to_cpup (& id -> cntlid );
@@ -3166,6 +3191,42 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
3166
3191
nvme_remove_invalid_namespaces (ctrl , nn );
3167
3192
}
3168
3193
3194
+ static bool nvme_scan_changed_ns_log (struct nvme_ctrl * ctrl )
3195
+ {
3196
+ size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof (__le32 );
3197
+ __le32 * log ;
3198
+ int error , i ;
3199
+ bool ret = false;
3200
+
3201
+ log = kzalloc (log_size , GFP_KERNEL );
3202
+ if (!log )
3203
+ return false;
3204
+
3205
+ error = nvme_get_log (ctrl , NVME_LOG_CHANGED_NS , log , log_size );
3206
+ if (error ) {
3207
+ dev_warn (ctrl -> device ,
3208
+ "reading changed ns log failed: %d\n" , error );
3209
+ goto out_free_log ;
3210
+ }
3211
+
3212
+ if (log [0 ] == cpu_to_le32 (0xffffffff ))
3213
+ goto out_free_log ;
3214
+
3215
+ for (i = 0 ; i < NVME_MAX_CHANGED_NAMESPACES ; i ++ ) {
3216
+ u32 nsid = le32_to_cpu (log [i ]);
3217
+
3218
+ if (nsid == 0 )
3219
+ break ;
3220
+ dev_info (ctrl -> device , "rescanning namespace %d.\n" , nsid );
3221
+ nvme_validate_ns (ctrl , nsid );
3222
+ }
3223
+ ret = true;
3224
+
3225
+ out_free_log :
3226
+ kfree (log );
3227
+ return ret ;
3228
+ }
3229
+
3169
3230
static void nvme_scan_work (struct work_struct * work )
3170
3231
{
3171
3232
struct nvme_ctrl * ctrl =
@@ -3178,33 +3239,30 @@ static void nvme_scan_work(struct work_struct *work)
3178
3239
3179
3240
WARN_ON_ONCE (!ctrl -> tagset );
3180
3241
3242
+ if (test_and_clear_bit (EVENT_NS_CHANGED , & ctrl -> events )) {
3243
+ if (nvme_scan_changed_ns_log (ctrl ))
3244
+ goto out_sort_namespaces ;
3245
+ dev_info (ctrl -> device , "rescanning namespaces.\n" );
3246
+ }
3247
+
3181
3248
if (nvme_identify_ctrl (ctrl , & id ))
3182
3249
return ;
3183
3250
3184
3251
nn = le32_to_cpu (id -> nn );
3185
3252
if (ctrl -> vs >= NVME_VS (1 , 1 , 0 ) &&
3186
3253
!(ctrl -> quirks & NVME_QUIRK_IDENTIFY_CNS )) {
3187
3254
if (!nvme_scan_ns_list (ctrl , nn ))
3188
- goto done ;
3255
+ goto out_free_id ;
3189
3256
}
3190
3257
nvme_scan_ns_sequential (ctrl , nn );
3191
- done :
3258
+ out_free_id :
3259
+ kfree (id );
3260
+ out_sort_namespaces :
3192
3261
down_write (& ctrl -> namespaces_rwsem );
3193
3262
list_sort (NULL , & ctrl -> namespaces , ns_cmp );
3194
3263
up_write (& ctrl -> namespaces_rwsem );
3195
- kfree (id );
3196
3264
}
3197
3265
3198
- void nvme_queue_scan (struct nvme_ctrl * ctrl )
3199
- {
3200
- /*
3201
- * Only new queue scan work when admin and IO queues are both alive
3202
- */
3203
- if (ctrl -> state == NVME_CTRL_LIVE )
3204
- queue_work (nvme_wq , & ctrl -> scan_work );
3205
- }
3206
- EXPORT_SYMBOL_GPL (nvme_queue_scan );
3207
-
3208
3266
/*
3209
3267
* This function iterates the namespace list unlocked to allow recovery from
3210
3268
* controller failure. It is up to the caller to ensure the namespace list is
@@ -3318,6 +3376,21 @@ static void nvme_fw_act_work(struct work_struct *work)
3318
3376
nvme_get_fw_slot_info (ctrl );
3319
3377
}
3320
3378
3379
+ static void nvme_handle_aen_notice (struct nvme_ctrl * ctrl , u32 result )
3380
+ {
3381
+ switch ((result & 0xff00 ) >> 8 ) {
3382
+ case NVME_AER_NOTICE_NS_CHANGED :
3383
+ set_bit (EVENT_NS_CHANGED , & ctrl -> events );
3384
+ nvme_queue_scan (ctrl );
3385
+ break ;
3386
+ case NVME_AER_NOTICE_FW_ACT_STARTING :
3387
+ queue_work (nvme_wq , & ctrl -> fw_act_work );
3388
+ break ;
3389
+ default :
3390
+ dev_warn (ctrl -> device , "async event result %08x\n" , result );
3391
+ }
3392
+ }
3393
+
3321
3394
void nvme_complete_async_event (struct nvme_ctrl * ctrl , __le16 status ,
3322
3395
volatile union nvme_result * res )
3323
3396
{
@@ -3327,6 +3400,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
3327
3400
return ;
3328
3401
3329
3402
switch (result & 0x7 ) {
3403
+ case NVME_AER_NOTICE :
3404
+ nvme_handle_aen_notice (ctrl , result );
3405
+ break ;
3330
3406
case NVME_AER_ERROR :
3331
3407
case NVME_AER_SMART :
3332
3408
case NVME_AER_CSS :
@@ -3336,18 +3412,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
3336
3412
default :
3337
3413
break ;
3338
3414
}
3339
-
3340
- switch (result & 0xff07 ) {
3341
- case NVME_AER_NOTICE_NS_CHANGED :
3342
- dev_info (ctrl -> device , "rescanning\n" );
3343
- nvme_queue_scan (ctrl );
3344
- break ;
3345
- case NVME_AER_NOTICE_FW_ACT_STARTING :
3346
- queue_work (nvme_wq , & ctrl -> fw_act_work );
3347
- break ;
3348
- default :
3349
- dev_warn (ctrl -> device , "async event result %08x\n" , result );
3350
- }
3351
3415
queue_work (nvme_wq , & ctrl -> async_event_work );
3352
3416
}
3353
3417
EXPORT_SYMBOL_GPL (nvme_complete_async_event );
@@ -3370,6 +3434,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
3370
3434
3371
3435
if (ctrl -> queue_count > 1 ) {
3372
3436
nvme_queue_scan (ctrl );
3437
+ nvme_enable_aen (ctrl );
3373
3438
queue_work (nvme_wq , & ctrl -> async_event_work );
3374
3439
nvme_start_queues (ctrl );
3375
3440
}
0 commit comments