@@ -277,6 +277,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
277
277
return 0 ;
278
278
}
279
279
280
+ /**
281
+ * in_pq - check if a wear-leveling entry is present in the protection queue.
282
+ * @ubi: UBI device description object
283
+ * @e: the wear-leveling entry to check
284
+ *
285
+ * This function returns non-zero if @e is in the protection queue and zero
286
+ * if it is not.
287
+ */
288
+ static inline int in_pq (const struct ubi_device * ubi , struct ubi_wl_entry * e )
289
+ {
290
+ struct ubi_wl_entry * p ;
291
+ int i ;
292
+
293
+ for (i = 0 ; i < UBI_PROT_QUEUE_LEN ; ++ i )
294
+ list_for_each_entry (p , & ubi -> pq [i ], u .list )
295
+ if (p == e )
296
+ return 1 ;
297
+
298
+ return 0 ;
299
+ }
300
+
280
301
/**
281
302
* prot_queue_add - add physical eraseblock to the protection queue.
282
303
* @ubi: UBI device description object
@@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1419
1440
return err ;
1420
1441
}
1421
1442
1443
+ static bool scrub_possible (struct ubi_device * ubi , struct ubi_wl_entry * e )
1444
+ {
1445
+ if (in_wl_tree (e , & ubi -> scrub ))
1446
+ return false;
1447
+ else if (in_wl_tree (e , & ubi -> erroneous ))
1448
+ return false;
1449
+ else if (ubi -> move_from == e )
1450
+ return false;
1451
+ else if (ubi -> move_to == e )
1452
+ return false;
1453
+
1454
+ return true;
1455
+ }
1456
+
1457
+ /**
1458
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1459
+ * @ubi: UBI device description object
1460
+ * @pnum: the physical eraseblock to schedule
1461
+ * @force: dont't read the block, assume bitflips happened and take action.
1462
+ *
1463
+ * This function reads the given eraseblock and checks if bitflips occured.
1464
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
1465
+ * If scrubbing is forced with @force, the eraseblock is not read,
1466
+ * but scheduled for scrubbing right away.
1467
+ *
1468
+ * Returns:
1469
+ * %EINVAL, PEB is out of range
1470
+ * %ENOENT, PEB is no longer used by UBI
1471
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
1472
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
1473
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1474
+ * %0, no bit flips detected
1475
+ */
1476
+ int ubi_bitflip_check (struct ubi_device * ubi , int pnum , int force )
1477
+ {
1478
+ int err ;
1479
+ struct ubi_wl_entry * e ;
1480
+
1481
+ if (pnum < 0 || pnum >= ubi -> peb_count ) {
1482
+ err = - EINVAL ;
1483
+ goto out ;
1484
+ }
1485
+
1486
+ /*
1487
+ * Pause all parallel work, otherwise it can happen that the
1488
+ * erase worker frees a wl entry under us.
1489
+ */
1490
+ down_write (& ubi -> work_sem );
1491
+
1492
+ /*
1493
+ * Make sure that the wl entry does not change state while
1494
+ * inspecting it.
1495
+ */
1496
+ spin_lock (& ubi -> wl_lock );
1497
+ e = ubi -> lookuptbl [pnum ];
1498
+ if (!e ) {
1499
+ spin_unlock (& ubi -> wl_lock );
1500
+ err = - ENOENT ;
1501
+ goto out_resume ;
1502
+ }
1503
+
1504
+ /*
1505
+ * Does it make sense to check this PEB?
1506
+ */
1507
+ if (!scrub_possible (ubi , e )) {
1508
+ spin_unlock (& ubi -> wl_lock );
1509
+ err = - EBUSY ;
1510
+ goto out_resume ;
1511
+ }
1512
+ spin_unlock (& ubi -> wl_lock );
1513
+
1514
+ if (!force ) {
1515
+ mutex_lock (& ubi -> buf_mutex );
1516
+ err = ubi_io_read (ubi , ubi -> peb_buf , pnum , 0 , ubi -> peb_size );
1517
+ mutex_unlock (& ubi -> buf_mutex );
1518
+ }
1519
+
1520
+ if (force || err == UBI_IO_BITFLIPS ) {
1521
+ /*
1522
+ * Okay, bit flip happened, let's figure out what we can do.
1523
+ */
1524
+ spin_lock (& ubi -> wl_lock );
1525
+
1526
+ /*
1527
+ * Recheck. We released wl_lock, UBI might have killed the
1528
+ * wl entry under us.
1529
+ */
1530
+ e = ubi -> lookuptbl [pnum ];
1531
+ if (!e ) {
1532
+ spin_unlock (& ubi -> wl_lock );
1533
+ err = - ENOENT ;
1534
+ goto out_resume ;
1535
+ }
1536
+
1537
+ /*
1538
+ * Need to re-check state
1539
+ */
1540
+ if (!scrub_possible (ubi , e )) {
1541
+ spin_unlock (& ubi -> wl_lock );
1542
+ err = - EBUSY ;
1543
+ goto out_resume ;
1544
+ }
1545
+
1546
+ if (in_pq (ubi , e )) {
1547
+ prot_queue_del (ubi , e -> pnum );
1548
+ wl_tree_add (e , & ubi -> scrub );
1549
+ spin_unlock (& ubi -> wl_lock );
1550
+
1551
+ err = ensure_wear_leveling (ubi , 1 );
1552
+ } else if (in_wl_tree (e , & ubi -> used )) {
1553
+ rb_erase (& e -> u .rb , & ubi -> used );
1554
+ wl_tree_add (e , & ubi -> scrub );
1555
+ spin_unlock (& ubi -> wl_lock );
1556
+
1557
+ err = ensure_wear_leveling (ubi , 1 );
1558
+ } else if (in_wl_tree (e , & ubi -> free )) {
1559
+ rb_erase (& e -> u .rb , & ubi -> free );
1560
+ ubi -> free_count -- ;
1561
+ spin_unlock (& ubi -> wl_lock );
1562
+
1563
+ /*
1564
+ * This PEB is empty we can schedule it for
1565
+ * erasure right away. No wear leveling needed.
1566
+ */
1567
+ err = schedule_erase (ubi , e , UBI_UNKNOWN , UBI_UNKNOWN ,
1568
+ force ? 0 : 1 , true);
1569
+ } else {
1570
+ spin_unlock (& ubi -> wl_lock );
1571
+ err = - EAGAIN ;
1572
+ }
1573
+
1574
+ if (!err && !force )
1575
+ err = - EUCLEAN ;
1576
+ } else {
1577
+ err = 0 ;
1578
+ }
1579
+
1580
+ out_resume :
1581
+ up_write (& ubi -> work_sem );
1582
+ out :
1583
+
1584
+ return err ;
1585
+ }
1586
+
1422
1587
/**
1423
1588
* tree_destroy - destroy an RB-tree.
1424
1589
* @ubi: UBI device description object
@@ -1848,16 +2013,11 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
1848
2013
static int self_check_in_pq (const struct ubi_device * ubi ,
1849
2014
struct ubi_wl_entry * e )
1850
2015
{
1851
- struct ubi_wl_entry * p ;
1852
- int i ;
1853
-
1854
2016
if (!ubi_dbg_chk_gen (ubi ))
1855
2017
return 0 ;
1856
2018
1857
- for (i = 0 ; i < UBI_PROT_QUEUE_LEN ; ++ i )
1858
- list_for_each_entry (p , & ubi -> pq [i ], u .list )
1859
- if (p == e )
1860
- return 0 ;
2019
+ if (in_pq (ubi , e ))
2020
+ return 0 ;
1861
2021
1862
2022
ubi_err (ubi , "self-check failed for PEB %d, EC %d, Protect queue" ,
1863
2023
e -> pnum , e -> ec );
0 commit comments