@@ -44,11 +44,12 @@ static void pblk_line_mark_bb(struct work_struct *work)
44
44
}
45
45
46
46
static void pblk_mark_bb (struct pblk * pblk , struct pblk_line * line ,
47
- struct ppa_addr * ppa )
47
+ struct ppa_addr ppa_addr )
48
48
{
49
49
struct nvm_tgt_dev * dev = pblk -> dev ;
50
50
struct nvm_geo * geo = & dev -> geo ;
51
- int pos = pblk_ppa_to_pos (geo , * ppa );
51
+ struct ppa_addr * ppa ;
52
+ int pos = pblk_ppa_to_pos (geo , ppa_addr );
52
53
53
54
pr_debug ("pblk: erase failed: line:%d, pos:%d\n" , line -> id , pos );
54
55
atomic_long_inc (& pblk -> erase_failed );
@@ -58,26 +59,38 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
58
59
pr_err ("pblk: attempted to erase bb: line:%d, pos:%d\n" ,
59
60
line -> id , pos );
60
61
62
+ /* Not necessary to mark bad blocks on 2.0 spec. */
63
+ if (geo -> version == NVM_OCSSD_SPEC_20 )
64
+ return ;
65
+
66
+ ppa = kmalloc (sizeof (struct ppa_addr ), GFP_ATOMIC );
67
+ if (!ppa )
68
+ return ;
69
+
70
+ * ppa = ppa_addr ;
61
71
pblk_gen_run_ws (pblk , NULL , ppa , pblk_line_mark_bb ,
62
72
GFP_ATOMIC , pblk -> bb_wq );
63
73
}
64
74
65
75
static void __pblk_end_io_erase (struct pblk * pblk , struct nvm_rq * rqd )
66
76
{
77
+ struct nvm_tgt_dev * dev = pblk -> dev ;
78
+ struct nvm_geo * geo = & dev -> geo ;
79
+ struct nvm_chk_meta * chunk ;
67
80
struct pblk_line * line ;
81
+ int pos ;
68
82
69
83
line = & pblk -> lines [pblk_ppa_to_line (rqd -> ppa_addr )];
84
+ pos = pblk_ppa_to_pos (geo , rqd -> ppa_addr );
85
+ chunk = & line -> chks [pos ];
86
+
70
87
atomic_dec (& line -> left_seblks );
71
88
72
89
if (rqd -> error ) {
73
- struct ppa_addr * ppa ;
74
-
75
- ppa = kmalloc (sizeof (struct ppa_addr ), GFP_ATOMIC );
76
- if (!ppa )
77
- return ;
78
-
79
- * ppa = rqd -> ppa_addr ;
80
- pblk_mark_bb (pblk , line , ppa );
90
+ chunk -> state = NVM_CHK_ST_OFFLINE ;
91
+ pblk_mark_bb (pblk , line , rqd -> ppa_addr );
92
+ } else {
93
+ chunk -> state = NVM_CHK_ST_FREE ;
81
94
}
82
95
83
96
atomic_dec (& pblk -> inflight_io );
@@ -92,6 +105,49 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
92
105
mempool_free (rqd , pblk -> e_rq_pool );
93
106
}
94
107
108
+ /*
109
+ * Get information for all chunks from the device.
110
+ *
111
+ * The caller is responsible for freeing the returned structure
112
+ */
113
+ struct nvm_chk_meta * pblk_chunk_get_info (struct pblk * pblk )
114
+ {
115
+ struct nvm_tgt_dev * dev = pblk -> dev ;
116
+ struct nvm_geo * geo = & dev -> geo ;
117
+ struct nvm_chk_meta * meta ;
118
+ struct ppa_addr ppa ;
119
+ unsigned long len ;
120
+ int ret ;
121
+
122
+ ppa .ppa = 0 ;
123
+
124
+ len = geo -> all_chunks * sizeof (* meta );
125
+ meta = kzalloc (len , GFP_KERNEL );
126
+ if (!meta )
127
+ return ERR_PTR (- ENOMEM );
128
+
129
+ ret = nvm_get_chunk_meta (dev , meta , ppa , geo -> all_chunks );
130
+ if (ret ) {
131
+ kfree (meta );
132
+ return ERR_PTR (- EIO );
133
+ }
134
+
135
+ return meta ;
136
+ }
137
+
138
+ struct nvm_chk_meta * pblk_chunk_get_off (struct pblk * pblk ,
139
+ struct nvm_chk_meta * meta ,
140
+ struct ppa_addr ppa )
141
+ {
142
+ struct nvm_tgt_dev * dev = pblk -> dev ;
143
+ struct nvm_geo * geo = & dev -> geo ;
144
+ int ch_off = ppa .m .grp * geo -> num_chk * geo -> num_lun ;
145
+ int lun_off = ppa .m .pu * geo -> num_chk ;
146
+ int chk_off = ppa .m .chk ;
147
+
148
+ return meta + ch_off + lun_off + chk_off ;
149
+ }
150
+
95
151
void __pblk_map_invalidate (struct pblk * pblk , struct pblk_line * line ,
96
152
u64 paddr )
97
153
{
@@ -1091,10 +1147,34 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1091
1147
return 1 ;
1092
1148
}
1093
1149
1150
+ static int pblk_prepare_new_line (struct pblk * pblk , struct pblk_line * line )
1151
+ {
1152
+ struct pblk_line_meta * lm = & pblk -> lm ;
1153
+ struct nvm_tgt_dev * dev = pblk -> dev ;
1154
+ struct nvm_geo * geo = & dev -> geo ;
1155
+ int blk_to_erase = atomic_read (& line -> blk_in_line );
1156
+ int i ;
1157
+
1158
+ for (i = 0 ; i < lm -> blk_per_line ; i ++ ) {
1159
+ struct pblk_lun * rlun = & pblk -> luns [i ];
1160
+ int pos = pblk_ppa_to_pos (geo , rlun -> bppa );
1161
+ int state = line -> chks [pos ].state ;
1162
+
1163
+ /* Free chunks should not be erased */
1164
+ if (state & NVM_CHK_ST_FREE ) {
1165
+ set_bit (pblk_ppa_to_pos (geo , rlun -> bppa ),
1166
+ line -> erase_bitmap );
1167
+ blk_to_erase -- ;
1168
+ }
1169
+ }
1170
+
1171
+ return blk_to_erase ;
1172
+ }
1173
+
1094
1174
static int pblk_line_prepare (struct pblk * pblk , struct pblk_line * line )
1095
1175
{
1096
1176
struct pblk_line_meta * lm = & pblk -> lm ;
1097
- int blk_in_line = atomic_read ( & line -> blk_in_line ) ;
1177
+ int blk_to_erase ;
1098
1178
1099
1179
line -> map_bitmap = kzalloc (lm -> sec_bitmap_len , GFP_ATOMIC );
1100
1180
if (!line -> map_bitmap )
@@ -1107,7 +1187,21 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1107
1187
return - ENOMEM ;
1108
1188
}
1109
1189
1190
+ /* Bad blocks do not need to be erased */
1191
+ bitmap_copy (line -> erase_bitmap , line -> blk_bitmap , lm -> blk_per_line );
1192
+
1110
1193
spin_lock (& line -> lock );
1194
+
1195
+ /* If we have not written to this line, we need to mark up free chunks
1196
+ * as already erased
1197
+ */
1198
+ if (line -> state == PBLK_LINESTATE_NEW ) {
1199
+ blk_to_erase = pblk_prepare_new_line (pblk , line );
1200
+ line -> state = PBLK_LINESTATE_FREE ;
1201
+ } else {
1202
+ blk_to_erase = atomic_read (& line -> blk_in_line );
1203
+ }
1204
+
1111
1205
if (line -> state != PBLK_LINESTATE_FREE ) {
1112
1206
kfree (line -> map_bitmap );
1113
1207
kfree (line -> invalid_bitmap );
@@ -1119,15 +1213,12 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1119
1213
1120
1214
line -> state = PBLK_LINESTATE_OPEN ;
1121
1215
1122
- atomic_set (& line -> left_eblks , blk_in_line );
1123
- atomic_set (& line -> left_seblks , blk_in_line );
1216
+ atomic_set (& line -> left_eblks , blk_to_erase );
1217
+ atomic_set (& line -> left_seblks , blk_to_erase );
1124
1218
1125
1219
line -> meta_distance = lm -> meta_distance ;
1126
1220
spin_unlock (& line -> lock );
1127
1221
1128
- /* Bad blocks do not need to be erased */
1129
- bitmap_copy (line -> erase_bitmap , line -> blk_bitmap , lm -> blk_per_line );
1130
-
1131
1222
kref_init (& line -> ref );
1132
1223
1133
1224
return 0 ;
@@ -1583,12 +1674,14 @@ static void pblk_line_should_sync_meta(struct pblk *pblk)
1583
1674
1584
1675
void pblk_line_close (struct pblk * pblk , struct pblk_line * line )
1585
1676
{
1677
+ struct nvm_tgt_dev * dev = pblk -> dev ;
1678
+ struct nvm_geo * geo = & dev -> geo ;
1679
+ struct pblk_line_meta * lm = & pblk -> lm ;
1586
1680
struct pblk_line_mgmt * l_mg = & pblk -> l_mg ;
1587
1681
struct list_head * move_list ;
1682
+ int i ;
1588
1683
1589
1684
#ifdef CONFIG_NVM_DEBUG
1590
- struct pblk_line_meta * lm = & pblk -> lm ;
1591
-
1592
1685
WARN (!bitmap_full (line -> map_bitmap , lm -> sec_per_line ),
1593
1686
"pblk: corrupt closed line %d\n" , line -> id );
1594
1687
#endif
@@ -1610,6 +1703,15 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1610
1703
line -> smeta = NULL ;
1611
1704
line -> emeta = NULL ;
1612
1705
1706
+ for (i = 0 ; i < lm -> blk_per_line ; i ++ ) {
1707
+ struct pblk_lun * rlun = & pblk -> luns [i ];
1708
+ int pos = pblk_ppa_to_pos (geo , rlun -> bppa );
1709
+ int state = line -> chks [pos ].state ;
1710
+
1711
+ if (!(state & NVM_CHK_ST_OFFLINE ))
1712
+ state = NVM_CHK_ST_CLOSED ;
1713
+ }
1714
+
1613
1715
spin_unlock (& line -> lock );
1614
1716
spin_unlock (& l_mg -> gc_lock );
1615
1717
}
0 commit comments