31
31
#define MIN_LOG2_INTERLEAVE_SECTORS 3
32
32
#define MAX_LOG2_INTERLEAVE_SECTORS 31
33
33
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
34
+ #define RECALC_SECTORS 8192
35
+ #define RECALC_WRITE_SUPER 16
34
36
35
37
/*
36
38
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -58,9 +60,12 @@ struct superblock {
58
60
__u64 provided_data_sectors ; /* userspace uses this value */
59
61
__u32 flags ;
60
62
__u8 log2_sectors_per_block ;
63
+ __u8 pad [3 ];
64
+ __u64 recalc_sector ;
61
65
};
62
66
63
67
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
68
+ #define SB_FLAG_RECALCULATING 0x2
64
69
65
70
#define JOURNAL_ENTRY_ROUNDUP 8
66
71
@@ -214,6 +219,11 @@ struct dm_integrity_c {
214
219
struct workqueue_struct * writer_wq ;
215
220
struct work_struct writer_work ;
216
221
222
+ struct workqueue_struct * recalc_wq ;
223
+ struct work_struct recalc_work ;
224
+ u8 * recalc_buffer ;
225
+ u8 * recalc_tags ;
226
+
217
227
struct bio_list flush_bio_list ;
218
228
219
229
unsigned long autocommit_jiffies ;
@@ -417,7 +427,7 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
417
427
418
428
static void sb_set_version (struct dm_integrity_c * ic )
419
429
{
420
- if (ic -> meta_dev )
430
+ if (ic -> meta_dev || ic -> sb -> flags & cpu_to_le32 ( SB_FLAG_RECALCULATING ) )
421
431
ic -> sb -> version = SB_VERSION_2 ;
422
432
else
423
433
ic -> sb -> version = SB_VERSION_1 ;
@@ -1777,9 +1787,14 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
1777
1787
1778
1788
if (need_sync_io ) {
1779
1789
wait_for_completion_io (& read_comp );
1790
+ if (unlikely (ic -> recalc_wq != NULL ) &&
1791
+ ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING ) &&
1792
+ dio -> range .logical_sector + dio -> range .n_sectors > le64_to_cpu (ic -> sb -> recalc_sector ))
1793
+ goto skip_check ;
1780
1794
if (likely (!bio -> bi_status ))
1781
1795
integrity_metadata (& dio -> work );
1782
1796
else
1797
+ skip_check :
1783
1798
dec_in_flight (dio );
1784
1799
1785
1800
} else {
@@ -2079,6 +2094,108 @@ static void integrity_writer(struct work_struct *w)
2079
2094
spin_unlock_irq (& ic -> endio_wait .lock );
2080
2095
}
2081
2096
2097
+ static void recalc_write_super (struct dm_integrity_c * ic )
2098
+ {
2099
+ int r ;
2100
+
2101
+ dm_integrity_flush_buffers (ic );
2102
+ if (dm_integrity_failed (ic ))
2103
+ return ;
2104
+
2105
+ sb_set_version (ic );
2106
+ r = sync_rw_sb (ic , REQ_OP_WRITE , 0 );
2107
+ if (unlikely (r ))
2108
+ dm_integrity_io_error (ic , "writing superblock" , r );
2109
+ }
2110
+
2111
+ static void integrity_recalc (struct work_struct * w )
2112
+ {
2113
+ struct dm_integrity_c * ic = container_of (w , struct dm_integrity_c , recalc_work );
2114
+ struct dm_integrity_range range ;
2115
+ struct dm_io_request io_req ;
2116
+ struct dm_io_region io_loc ;
2117
+ sector_t area , offset ;
2118
+ sector_t metadata_block ;
2119
+ unsigned metadata_offset ;
2120
+ __u8 * t ;
2121
+ unsigned i ;
2122
+ int r ;
2123
+ unsigned super_counter = 0 ;
2124
+
2125
+ spin_lock_irq (& ic -> endio_wait .lock );
2126
+
2127
+ next_chunk :
2128
+
2129
+ if (unlikely (READ_ONCE (ic -> suspending )))
2130
+ goto unlock_ret ;
2131
+
2132
+ range .logical_sector = le64_to_cpu (ic -> sb -> recalc_sector );
2133
+ if (unlikely (range .logical_sector >= ic -> provided_data_sectors ))
2134
+ goto unlock_ret ;
2135
+
2136
+ get_area_and_offset (ic , range .logical_sector , & area , & offset );
2137
+ range .n_sectors = min ((sector_t )RECALC_SECTORS , ic -> provided_data_sectors - range .logical_sector );
2138
+ if (!ic -> meta_dev )
2139
+ range .n_sectors = min (range .n_sectors , (1U << ic -> sb -> log2_interleave_sectors ) - (unsigned )offset );
2140
+
2141
+ if (unlikely (!add_new_range (ic , & range , true)))
2142
+ wait_and_add_new_range (ic , & range );
2143
+
2144
+ spin_unlock_irq (& ic -> endio_wait .lock );
2145
+
2146
+ if (unlikely (++ super_counter == RECALC_WRITE_SUPER )) {
2147
+ recalc_write_super (ic );
2148
+ super_counter = 0 ;
2149
+ }
2150
+
2151
+ if (unlikely (dm_integrity_failed (ic )))
2152
+ goto err ;
2153
+
2154
+ io_req .bi_op = REQ_OP_READ ;
2155
+ io_req .bi_op_flags = 0 ;
2156
+ io_req .mem .type = DM_IO_VMA ;
2157
+ io_req .mem .ptr .addr = ic -> recalc_buffer ;
2158
+ io_req .notify .fn = NULL ;
2159
+ io_req .client = ic -> io ;
2160
+ io_loc .bdev = ic -> dev -> bdev ;
2161
+ io_loc .sector = get_data_sector (ic , area , offset );
2162
+ io_loc .count = range .n_sectors ;
2163
+
2164
+ r = dm_io (& io_req , 1 , & io_loc , NULL );
2165
+ if (unlikely (r )) {
2166
+ dm_integrity_io_error (ic , "reading data" , r );
2167
+ goto err ;
2168
+ }
2169
+
2170
+ t = ic -> recalc_tags ;
2171
+ for (i = 0 ; i < range .n_sectors ; i += ic -> sectors_per_block ) {
2172
+ integrity_sector_checksum (ic , range .logical_sector + i , ic -> recalc_buffer + (i << SECTOR_SHIFT ), t );
2173
+ t += ic -> tag_size ;
2174
+ }
2175
+
2176
+ metadata_block = get_metadata_sector_and_offset (ic , area , offset , & metadata_offset );
2177
+
2178
+ r = dm_integrity_rw_tag (ic , ic -> recalc_tags , & metadata_block , & metadata_offset , t - ic -> recalc_tags , TAG_WRITE );
2179
+ if (unlikely (r )) {
2180
+ dm_integrity_io_error (ic , "writing tags" , r );
2181
+ goto err ;
2182
+ }
2183
+
2184
+ spin_lock_irq (& ic -> endio_wait .lock );
2185
+ remove_range_unlocked (ic , & range );
2186
+ ic -> sb -> recalc_sector = cpu_to_le64 (range .logical_sector + range .n_sectors );
2187
+ goto next_chunk ;
2188
+
2189
+ err :
2190
+ remove_range (ic , & range );
2191
+ return ;
2192
+
2193
+ unlock_ret :
2194
+ spin_unlock_irq (& ic -> endio_wait .lock );
2195
+
2196
+ recalc_write_super (ic );
2197
+ }
2198
+
2082
2199
static void init_journal (struct dm_integrity_c * ic , unsigned start_section ,
2083
2200
unsigned n_sections , unsigned char commit_seq )
2084
2201
{
@@ -2283,6 +2400,9 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
2283
2400
2284
2401
WRITE_ONCE (ic -> suspending , 1 );
2285
2402
2403
+ if (ic -> recalc_wq )
2404
+ drain_workqueue (ic -> recalc_wq );
2405
+
2286
2406
queue_work (ic -> commit_wq , & ic -> commit_work );
2287
2407
drain_workqueue (ic -> commit_wq );
2288
2408
@@ -2305,6 +2425,16 @@ static void dm_integrity_resume(struct dm_target *ti)
2305
2425
struct dm_integrity_c * ic = (struct dm_integrity_c * )ti -> private ;
2306
2426
2307
2427
replay_journal (ic );
2428
+
2429
+ if (ic -> recalc_wq && ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING )) {
2430
+ __u64 recalc_pos = le64_to_cpu (ic -> sb -> recalc_sector );
2431
+ if (recalc_pos < ic -> provided_data_sectors ) {
2432
+ queue_work (ic -> recalc_wq , & ic -> recalc_work );
2433
+ } else if (recalc_pos > ic -> provided_data_sectors ) {
2434
+ ic -> sb -> recalc_sector = cpu_to_le64 (ic -> provided_data_sectors );
2435
+ recalc_write_super (ic );
2436
+ }
2437
+ }
2308
2438
}
2309
2439
2310
2440
static void dm_integrity_status (struct dm_target * ti , status_type_t type ,
@@ -2319,6 +2449,10 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2319
2449
DMEMIT ("%llu %llu" ,
2320
2450
(unsigned long long )atomic64_read (& ic -> number_of_mismatches ),
2321
2451
(unsigned long long )ic -> provided_data_sectors );
2452
+ if (ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING ))
2453
+ DMEMIT (" %llu" , (unsigned long long )le64_to_cpu (ic -> sb -> recalc_sector ));
2454
+ else
2455
+ DMEMIT (" -" );
2322
2456
break ;
2323
2457
2324
2458
case STATUSTYPE_TABLE : {
@@ -2328,20 +2462,23 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2328
2462
arg_count = 5 ;
2329
2463
arg_count += !!ic -> meta_dev ;
2330
2464
arg_count += ic -> sectors_per_block != 1 ;
2465
+ arg_count += !!(ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING ));
2331
2466
arg_count += !!ic -> internal_hash_alg .alg_string ;
2332
2467
arg_count += !!ic -> journal_crypt_alg .alg_string ;
2333
2468
arg_count += !!ic -> journal_mac_alg .alg_string ;
2334
2469
DMEMIT ("%s %llu %u %c %u" , ic -> dev -> name , (unsigned long long )ic -> start ,
2335
2470
ic -> tag_size , ic -> mode , arg_count );
2336
2471
if (ic -> meta_dev )
2337
2472
DMEMIT (" meta_device:%s" , ic -> meta_dev -> name );
2473
+ if (ic -> sectors_per_block != 1 )
2474
+ DMEMIT (" block_size:%u" , ic -> sectors_per_block << SECTOR_SHIFT );
2475
+ if (ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING ))
2476
+ DMEMIT (" recalculate" );
2338
2477
DMEMIT (" journal_sectors:%u" , ic -> initial_sectors - SB_SECTORS );
2339
2478
DMEMIT (" interleave_sectors:%u" , 1U << ic -> sb -> log2_interleave_sectors );
2340
2479
DMEMIT (" buffer_sectors:%u" , 1U << ic -> log2_buffer_sectors );
2341
2480
DMEMIT (" journal_watermark:%u" , (unsigned )watermark_percentage );
2342
2481
DMEMIT (" commit_time:%u" , ic -> autocommit_msec );
2343
- if (ic -> sectors_per_block != 1 )
2344
- DMEMIT (" block_size:%u" , ic -> sectors_per_block << SECTOR_SHIFT );
2345
2482
2346
2483
#define EMIT_ALG (a , n ) \
2347
2484
do { \
@@ -2947,6 +3084,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
2947
3084
{0 , 9 , "Invalid number of feature args" },
2948
3085
};
2949
3086
unsigned journal_sectors , interleave_sectors , buffer_sectors , journal_watermark , sync_msec ;
3087
+ bool recalculate ;
2950
3088
bool should_write_sb ;
2951
3089
__u64 threshold ;
2952
3090
unsigned long long start ;
@@ -3008,6 +3146,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3008
3146
buffer_sectors = DEFAULT_BUFFER_SECTORS ;
3009
3147
journal_watermark = DEFAULT_JOURNAL_WATERMARK ;
3010
3148
sync_msec = DEFAULT_SYNC_MSEC ;
3149
+ recalculate = false;
3011
3150
ic -> sectors_per_block = 1 ;
3012
3151
3013
3152
as .argc = argc - DIRECT_ARGUMENTS ;
@@ -3069,6 +3208,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3069
3208
"Invalid journal_mac argument" );
3070
3209
if (r )
3071
3210
goto bad ;
3211
+ } else if (!strcmp (opt_string , "recalculate" )) {
3212
+ recalculate = true;
3072
3213
} else {
3073
3214
r = - EINVAL ;
3074
3215
ti -> error = "Invalid argument" ;
@@ -3297,6 +3438,38 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3297
3438
(unsigned long long )ic -> provided_data_sectors );
3298
3439
DEBUG_print (" log2_buffer_sectors %u\n" , ic -> log2_buffer_sectors );
3299
3440
3441
+ if (recalculate && !(ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING ))) {
3442
+ ic -> sb -> flags |= cpu_to_le32 (SB_FLAG_RECALCULATING );
3443
+ ic -> sb -> recalc_sector = cpu_to_le64 (0 );
3444
+ }
3445
+
3446
+ if (ic -> sb -> flags & cpu_to_le32 (SB_FLAG_RECALCULATING )) {
3447
+ if (!ic -> internal_hash ) {
3448
+ r = - EINVAL ;
3449
+ ti -> error = "Recalculate is only valid with internal hash" ;
3450
+ goto bad ;
3451
+ }
3452
+ ic -> recalc_wq = alloc_workqueue ("dm-intergrity-recalc" , WQ_MEM_RECLAIM , 1 );
3453
+ if (!ic -> recalc_wq ) {
3454
+ ti -> error = "Cannot allocate workqueue" ;
3455
+ r = - ENOMEM ;
3456
+ goto bad ;
3457
+ }
3458
+ INIT_WORK (& ic -> recalc_work , integrity_recalc );
3459
+ ic -> recalc_buffer = vmalloc (RECALC_SECTORS << SECTOR_SHIFT );
3460
+ if (!ic -> recalc_buffer ) {
3461
+ ti -> error = "Cannot allocate buffer for recalculating" ;
3462
+ r = - ENOMEM ;
3463
+ goto bad ;
3464
+ }
3465
+ ic -> recalc_tags = kvmalloc ((RECALC_SECTORS >> ic -> sb -> log2_sectors_per_block ) * ic -> tag_size , GFP_KERNEL );
3466
+ if (!ic -> recalc_tags ) {
3467
+ ti -> error = "Cannot allocate tags for recalculating" ;
3468
+ r = - ENOMEM ;
3469
+ goto bad ;
3470
+ }
3471
+ }
3472
+
3300
3473
ic -> bufio = dm_bufio_client_create (ic -> meta_dev ? ic -> meta_dev -> bdev : ic -> dev -> bdev ,
3301
3474
1U << (SECTOR_SHIFT + ic -> log2_buffer_sectors ), 1 , 0 , NULL , NULL );
3302
3475
if (IS_ERR (ic -> bufio )) {
@@ -3363,6 +3536,12 @@ static void dm_integrity_dtr(struct dm_target *ti)
3363
3536
destroy_workqueue (ic -> commit_wq );
3364
3537
if (ic -> writer_wq )
3365
3538
destroy_workqueue (ic -> writer_wq );
3539
+ if (ic -> recalc_wq )
3540
+ destroy_workqueue (ic -> recalc_wq );
3541
+ if (ic -> recalc_buffer )
3542
+ vfree (ic -> recalc_buffer );
3543
+ if (ic -> recalc_tags )
3544
+ kvfree (ic -> recalc_tags );
3366
3545
if (ic -> bufio )
3367
3546
dm_bufio_client_destroy (ic -> bufio );
3368
3547
mempool_exit (& ic -> journal_io_mempool );
@@ -3412,7 +3591,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
3412
3591
3413
3592
static struct target_type integrity_target = {
3414
3593
.name = "integrity" ,
3415
- .version = {1 , 1 , 0 },
3594
+ .version = {1 , 2 , 0 },
3416
3595
.module = THIS_MODULE ,
3417
3596
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY ,
3418
3597
.ctr = dm_integrity_ctr ,
0 commit comments