Skip to content

Commit 32ef941

Browse files
Javier Gonzálezaxboe
authored andcommitted
lightnvm: pblk: implement get log report chunk
In preparation of pblk supporting 2.0, implement the get log report chunk in pblk. Also, define the chunk states as given in the 2.0 spec. Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent bb845ae commit 32ef941

File tree

4 files changed

+298
-82
lines changed

4 files changed

+298
-82
lines changed

drivers/lightnvm/pblk-core.c

Lines changed: 120 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -44,11 +44,12 @@ static void pblk_line_mark_bb(struct work_struct *work)
4444
}
4545

4646
static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
47-
struct ppa_addr *ppa)
47+
struct ppa_addr ppa_addr)
4848
{
4949
struct nvm_tgt_dev *dev = pblk->dev;
5050
struct nvm_geo *geo = &dev->geo;
51-
int pos = pblk_ppa_to_pos(geo, *ppa);
51+
struct ppa_addr *ppa;
52+
int pos = pblk_ppa_to_pos(geo, ppa_addr);
5253

5354
pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
5455
atomic_long_inc(&pblk->erase_failed);
@@ -58,26 +59,38 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
5859
pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
5960
line->id, pos);
6061

62+
/* Not necessary to mark bad blocks on 2.0 spec. */
63+
if (geo->version == NVM_OCSSD_SPEC_20)
64+
return;
65+
66+
ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
67+
if (!ppa)
68+
return;
69+
70+
*ppa = ppa_addr;
6171
pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
6272
GFP_ATOMIC, pblk->bb_wq);
6373
}
6474

6575
static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
6676
{
77+
struct nvm_tgt_dev *dev = pblk->dev;
78+
struct nvm_geo *geo = &dev->geo;
79+
struct nvm_chk_meta *chunk;
6780
struct pblk_line *line;
81+
int pos;
6882

6983
line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
84+
pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
85+
chunk = &line->chks[pos];
86+
7087
atomic_dec(&line->left_seblks);
7188

7289
if (rqd->error) {
73-
struct ppa_addr *ppa;
74-
75-
ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
76-
if (!ppa)
77-
return;
78-
79-
*ppa = rqd->ppa_addr;
80-
pblk_mark_bb(pblk, line, ppa);
90+
chunk->state = NVM_CHK_ST_OFFLINE;
91+
pblk_mark_bb(pblk, line, rqd->ppa_addr);
92+
} else {
93+
chunk->state = NVM_CHK_ST_FREE;
8194
}
8295

8396
atomic_dec(&pblk->inflight_io);
@@ -92,6 +105,49 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
92105
mempool_free(rqd, pblk->e_rq_pool);
93106
}
94107

108+
/*
109+
* Get information for all chunks from the device.
110+
*
111+
* The caller is responsible for freeing the returned structure
112+
*/
113+
struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
114+
{
115+
struct nvm_tgt_dev *dev = pblk->dev;
116+
struct nvm_geo *geo = &dev->geo;
117+
struct nvm_chk_meta *meta;
118+
struct ppa_addr ppa;
119+
unsigned long len;
120+
int ret;
121+
122+
ppa.ppa = 0;
123+
124+
len = geo->all_chunks * sizeof(*meta);
125+
meta = kzalloc(len, GFP_KERNEL);
126+
if (!meta)
127+
return ERR_PTR(-ENOMEM);
128+
129+
ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
130+
if (ret) {
131+
kfree(meta);
132+
return ERR_PTR(-EIO);
133+
}
134+
135+
return meta;
136+
}
137+
138+
struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
139+
struct nvm_chk_meta *meta,
140+
struct ppa_addr ppa)
141+
{
142+
struct nvm_tgt_dev *dev = pblk->dev;
143+
struct nvm_geo *geo = &dev->geo;
144+
int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
145+
int lun_off = ppa.m.pu * geo->num_chk;
146+
int chk_off = ppa.m.chk;
147+
148+
return meta + ch_off + lun_off + chk_off;
149+
}
150+
95151
void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
96152
u64 paddr)
97153
{
@@ -1091,10 +1147,34 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
10911147
return 1;
10921148
}
10931149

1150+
static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1151+
{
1152+
struct pblk_line_meta *lm = &pblk->lm;
1153+
struct nvm_tgt_dev *dev = pblk->dev;
1154+
struct nvm_geo *geo = &dev->geo;
1155+
int blk_to_erase = atomic_read(&line->blk_in_line);
1156+
int i;
1157+
1158+
for (i = 0; i < lm->blk_per_line; i++) {
1159+
struct pblk_lun *rlun = &pblk->luns[i];
1160+
int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1161+
int state = line->chks[pos].state;
1162+
1163+
/* Free chunks should not be erased */
1164+
if (state & NVM_CHK_ST_FREE) {
1165+
set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1166+
line->erase_bitmap);
1167+
blk_to_erase--;
1168+
}
1169+
}
1170+
1171+
return blk_to_erase;
1172+
}
1173+
10941174
static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
10951175
{
10961176
struct pblk_line_meta *lm = &pblk->lm;
1097-
int blk_in_line = atomic_read(&line->blk_in_line);
1177+
int blk_to_erase;
10981178

10991179
line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
11001180
if (!line->map_bitmap)
@@ -1107,7 +1187,21 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
11071187
return -ENOMEM;
11081188
}
11091189

1190+
/* Bad blocks do not need to be erased */
1191+
bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1192+
11101193
spin_lock(&line->lock);
1194+
1195+
/* If we have not written to this line, we need to mark up free chunks
1196+
* as already erased
1197+
*/
1198+
if (line->state == PBLK_LINESTATE_NEW) {
1199+
blk_to_erase = pblk_prepare_new_line(pblk, line);
1200+
line->state = PBLK_LINESTATE_FREE;
1201+
} else {
1202+
blk_to_erase = atomic_read(&line->blk_in_line);
1203+
}
1204+
11111205
if (line->state != PBLK_LINESTATE_FREE) {
11121206
kfree(line->map_bitmap);
11131207
kfree(line->invalid_bitmap);
@@ -1119,15 +1213,12 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
11191213

11201214
line->state = PBLK_LINESTATE_OPEN;
11211215

1122-
atomic_set(&line->left_eblks, blk_in_line);
1123-
atomic_set(&line->left_seblks, blk_in_line);
1216+
atomic_set(&line->left_eblks, blk_to_erase);
1217+
atomic_set(&line->left_seblks, blk_to_erase);
11241218

11251219
line->meta_distance = lm->meta_distance;
11261220
spin_unlock(&line->lock);
11271221

1128-
/* Bad blocks do not need to be erased */
1129-
bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1130-
11311222
kref_init(&line->ref);
11321223

11331224
return 0;
@@ -1583,12 +1674,14 @@ static void pblk_line_should_sync_meta(struct pblk *pblk)
15831674

15841675
void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
15851676
{
1677+
struct nvm_tgt_dev *dev = pblk->dev;
1678+
struct nvm_geo *geo = &dev->geo;
1679+
struct pblk_line_meta *lm = &pblk->lm;
15861680
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
15871681
struct list_head *move_list;
1682+
int i;
15881683

15891684
#ifdef CONFIG_NVM_DEBUG
1590-
struct pblk_line_meta *lm = &pblk->lm;
1591-
15921685
WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
15931686
"pblk: corrupt closed line %d\n", line->id);
15941687
#endif
@@ -1610,6 +1703,15 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
16101703
line->smeta = NULL;
16111704
line->emeta = NULL;
16121705

1706+
for (i = 0; i < lm->blk_per_line; i++) {
1707+
struct pblk_lun *rlun = &pblk->luns[i];
1708+
int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1709+
int state = line->chks[pos].state;
1710+
1711+
if (!(state & NVM_CHK_ST_OFFLINE))
1712+
state = NVM_CHK_ST_CLOSED;
1713+
}
1714+
16131715
spin_unlock(&line->lock);
16141716
spin_unlock(&l_mg->gc_lock);
16151717
}

0 commit comments

Comments
 (0)