@@ -1440,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1440
1440
return err ;
1441
1441
}
1442
1442
1443
+ static bool scrub_possible (struct ubi_device * ubi , struct ubi_wl_entry * e )
1444
+ {
1445
+ if (in_wl_tree (e , & ubi -> scrub ))
1446
+ return false;
1447
+ else if (in_wl_tree (e , & ubi -> erroneous ))
1448
+ return false;
1449
+ else if (ubi -> move_from == e )
1450
+ return false;
1451
+ else if (ubi -> move_to == e )
1452
+ return false;
1453
+
1454
+ return true;
1455
+ }
1456
+
1457
+ /**
1458
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1459
+ * @ubi: UBI device description object
1460
+ * @pnum: the physical eraseblock to schedule
1461
+ * @force: dont't read the block, assume bitflips happened and take action.
1462
+ *
1463
+ * This function reads the given eraseblock and checks if bitflips occured.
1464
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
1465
+ * If scrubbing is forced with @force, the eraseblock is not read,
1466
+ * but scheduled for scrubbing right away.
1467
+ *
1468
+ * Returns:
1469
+ * %EINVAL, PEB is out of range
1470
+ * %ENOENT, PEB is no longer used by UBI
1471
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
1472
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
1473
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1474
+ * %0, no bit flips detected
1475
+ */
1476
+ int ubi_bitflip_check (struct ubi_device * ubi , int pnum , int force )
1477
+ {
1478
+ int err ;
1479
+ struct ubi_wl_entry * e ;
1480
+
1481
+ if (pnum < 0 || pnum >= ubi -> peb_count ) {
1482
+ err = - EINVAL ;
1483
+ goto out ;
1484
+ }
1485
+
1486
+ /*
1487
+ * Pause all parallel work, otherwise it can happen that the
1488
+ * erase worker frees a wl entry under us.
1489
+ */
1490
+ down_write (& ubi -> work_sem );
1491
+
1492
+ /*
1493
+ * Make sure that the wl entry does not change state while
1494
+ * inspecting it.
1495
+ */
1496
+ spin_lock (& ubi -> wl_lock );
1497
+ e = ubi -> lookuptbl [pnum ];
1498
+ if (!e ) {
1499
+ spin_unlock (& ubi -> wl_lock );
1500
+ err = - ENOENT ;
1501
+ goto out_resume ;
1502
+ }
1503
+
1504
+ /*
1505
+ * Does it make sense to check this PEB?
1506
+ */
1507
+ if (!scrub_possible (ubi , e )) {
1508
+ spin_unlock (& ubi -> wl_lock );
1509
+ err = - EBUSY ;
1510
+ goto out_resume ;
1511
+ }
1512
+ spin_unlock (& ubi -> wl_lock );
1513
+
1514
+ if (!force ) {
1515
+ mutex_lock (& ubi -> buf_mutex );
1516
+ err = ubi_io_read (ubi , ubi -> peb_buf , pnum , 0 , ubi -> peb_size );
1517
+ mutex_unlock (& ubi -> buf_mutex );
1518
+ }
1519
+
1520
+ if (err == UBI_IO_BITFLIPS || force ) {
1521
+ /*
1522
+ * Okay, bit flip happened, let's figure out what we can do.
1523
+ */
1524
+ spin_lock (& ubi -> wl_lock );
1525
+
1526
+ /*
1527
+ * Recheck. We released wl_lock, UBI might have killed the
1528
+ * wl entry under us.
1529
+ */
1530
+ e = ubi -> lookuptbl [pnum ];
1531
+ if (!e ) {
1532
+ spin_unlock (& ubi -> wl_lock );
1533
+ err = - ENOENT ;
1534
+ goto out_resume ;
1535
+ }
1536
+
1537
+ /*
1538
+ * Need to re-check state
1539
+ */
1540
+ if (!scrub_possible (ubi , e )) {
1541
+ spin_unlock (& ubi -> wl_lock );
1542
+ err = - EBUSY ;
1543
+ goto out_resume ;
1544
+ }
1545
+
1546
+ if (in_pq (ubi , e )) {
1547
+ prot_queue_del (ubi , e -> pnum );
1548
+ wl_tree_add (e , & ubi -> scrub );
1549
+ spin_unlock (& ubi -> wl_lock );
1550
+
1551
+ err = ensure_wear_leveling (ubi , 1 );
1552
+ } else if (in_wl_tree (e , & ubi -> used )) {
1553
+ rb_erase (& e -> u .rb , & ubi -> used );
1554
+ wl_tree_add (e , & ubi -> scrub );
1555
+ spin_unlock (& ubi -> wl_lock );
1556
+
1557
+ err = ensure_wear_leveling (ubi , 1 );
1558
+ } else if (in_wl_tree (e , & ubi -> free )) {
1559
+ rb_erase (& e -> u .rb , & ubi -> free );
1560
+ ubi -> free_count -- ;
1561
+ spin_unlock (& ubi -> wl_lock );
1562
+
1563
+ /*
1564
+ * This PEB is empty we can schedule it for
1565
+ * erasure right away. No wear leveling needed.
1566
+ */
1567
+ err = schedule_erase (ubi , e , UBI_UNKNOWN , UBI_UNKNOWN ,
1568
+ force ? 0 : 1 , true);
1569
+ } else {
1570
+ spin_unlock (& ubi -> wl_lock );
1571
+ err = - EAGAIN ;
1572
+ }
1573
+
1574
+ if (!err && !force )
1575
+ err = - EUCLEAN ;
1576
+ } else {
1577
+ err = 0 ;
1578
+ }
1579
+
1580
+ out_resume :
1581
+ up_write (& ubi -> work_sem );
1582
+ out :
1583
+
1584
+ return err ;
1585
+ }
1586
+
1443
1587
/**
1444
1588
* tree_destroy - destroy an RB-tree.
1445
1589
* @ubi: UBI device description object
0 commit comments