Skip to content
Merged
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Once installed, an [Intro to Chia][link-intro] guide is available in the [Chia D
[badge-commits]: https://img.shields.io/github/commit-activity/w/Chia-Network/chia-blockchain?logo=GitHub
[badge-contributors]: https://img.shields.io/github/contributors/Chia-Network/chia-blockchain?logo=GitHub
[badge-coverage]: https://img.shields.io/coverallsCoverage/github/Chia-Network/chia-blockchain?logo=Coveralls&logoColor=red&labelColor=%23212F39
[badge-discord]: https://dcbadge.vercel.app/api/server/chia?style=flat-square&theme=full-presence
[badge-discord]: https://img.shields.io/badge/discord-Chia%20Network-green.svg
[badge-discord2]: https://img.shields.io/discord/1034523881404370984.svg?label=Discord&logo=discord&colorB=1e2b2f
[badge-downloads]: https://img.shields.io/github/downloads/Chia-Network/chia-blockchain/total?logo=GitHub
[badge-rc]: https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fdownload.chia.net%2Flatest%2Fbadge-data-rc.json&query=%24.message&logo=chianetwork&logoColor=white&label=Latest%20RC&labelColor=%230d3349&color=%23474748
Expand Down
2 changes: 1 addition & 1 deletion chia/_tests/core/mempool/test_singleton_fast_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -663,4 +663,4 @@ async def test_double_spend_ff_spend_no_latest_unspent() -> None:
status, error = await make_and_send_spend_bundle(sim, sim_client, [singleton_coin_spend], aggsig=sig)
# It fails validation because it doesn't currently have a latest unspent
assert status == MempoolInclusionStatus.FAILED
assert error == Err.DOUBLE_SPEND
assert error == Err.UNKNOWN_UNSPENT
11 changes: 8 additions & 3 deletions chia/full_node/mempool_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -625,11 +625,16 @@ async def validate_spend_bundle(
eligible_for_ff = bool(spend_conds.flags & ELIGIBLE_FOR_FF) and supports_fast_forward(coin_spend)
if eligible_for_ff:
# Make sure the fast forward spend still has a version that is
# still unspent, because if the singleton has been melted, the
# fast forward spend will never become valid.
# still unspent, because if the singleton has been spent in a
# non-FF spend, this fast forward spend will never become valid.
# So treat this as a normal spend, which requires the exact coin
# to exist and be unspent.
# Singletons that were created before the optimization of using
# spent_index will also fail this test, and such spends will
# fall back to be treated as non-FF spends.
lineage_info = await get_unspent_lineage_info_for_puzzle_hash(spend_conds.puzzle_hash)
if lineage_info is None:
return Err.DOUBLE_SPEND, None, []
eligible_for_ff = False

spend_additions = []
for puzzle_hash, amount, _ in spend_conds.create_coin:
Expand Down
82 changes: 42 additions & 40 deletions chia/plotting/check_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from collections import Counter
from pathlib import Path
from threading import Lock
from time import sleep, time
from time import monotonic, sleep
from typing import Optional

from chia_rs import G1Element
Expand Down Expand Up @@ -172,45 +172,16 @@ def process_plot(plot_path: Path, plot_info: PlotInfo, num_start: int, num_end:
challenge = std_hash(i.to_bytes(32, "big"))
# Some plot errors cause get_qualities_for_challenge to throw a RuntimeError
try:
quality_start_time = round(time() * 1000)
for index, quality_str in enumerate(pr.get_qualities_for_challenge(challenge)):
quality_spent_time = round(time() * 1000) - quality_start_time
if quality_spent_time > 8000:
log.warning(
f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 8 seconds "
f"to minimize risk of losing rewards. Filepath: {plot_path}"
)
else:
log.info(f"\tLooking up qualities took: {quality_spent_time} ms. Filepath: {plot_path}")

# Other plot errors cause get_full_proof or validate_proof to throw an AssertionError
try:
proof_start_time = round(time() * 1000)
# TODO : todo_v2_plots handle v2 plots
proof = pr.get_full_proof(challenge, index, parallel_read)
proof_spent_time = round(time() * 1000) - proof_start_time
if proof_spent_time > 15000:
log.warning(
f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds "
f"to minimize risk of losing rewards. Filepath: {plot_path}"
)
else:
log.info(f"\tFinding proof took: {proof_spent_time} ms. Filepath: {plot_path}")

ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof)
if quality_str == ver_quality_str:
total_proofs += 1
else:
log.warning(
f"\tQuality doesn't match with proof. Filepath: {plot_path} "
"This can occasionally happen with a compressed plot."
)
except AssertionError as e:
log.error(
f"{type(e)}: {e} error in proving/verifying for plot {plot_path}. Filepath: {plot_path}"
)
caught_exception = True
quality_start_time = round(time() * 1000)
quality_start_time = round(monotonic() * 1000)
qualities = pr.get_qualities_for_challenge(challenge)
quality_spent_time = round(monotonic() * 1000) - quality_start_time
if quality_spent_time > 8000:
log.warning(
f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 8 seconds "
f"to minimize risk of losing rewards. Filepath: {plot_path}"
)
else:
log.info(f"\tLooking up qualities took: {quality_spent_time} ms. Filepath: {plot_path}")
except KeyboardInterrupt:
log.warning("Interrupted, closing")
return
Expand All @@ -224,9 +195,40 @@ def process_plot(plot_path: Path, plot_info: PlotInfo, num_start: int, num_end:
else:
log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}")
caught_exception = True
continue
except Exception as e:
log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}")
caught_exception = True
break

for index, quality_str in enumerate(qualities):
# Other plot errors cause get_full_proof or validate_proof to throw an AssertionError
try:
proof_start_time = round(monotonic() * 1000)
# TODO : todo_v2_plots handle v2 plots
proof = pr.get_full_proof(challenge, index, parallel_read)
proof_spent_time = round(monotonic() * 1000) - proof_start_time
if proof_spent_time > 15000:
log.warning(
f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds "
f"to minimize risk of losing rewards. Filepath: {plot_path}"
)
else:
log.info(f"\tFinding proof took: {proof_spent_time} ms. Filepath: {plot_path}")

ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size().size_v1, challenge, proof)
if quality_str == ver_quality_str:
total_proofs += 1
else:
log.warning(
f"\tQuality doesn't match with proof. Filepath: {plot_path} "
"This can occasionally happen with a compressed plot."
)
except AssertionError as e:
log.error(
f"{type(e)}: {e} error in proving/verifying for plot {plot_path}. Filepath: {plot_path}"
)
caught_exception = True
if caught_exception is True:
break

Expand Down
30 changes: 14 additions & 16 deletions chia/plotting/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,22 +333,20 @@ def process_file(file_path: Path) -> Optional[PlotInfo]:
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.

version_and_size = prover.get_size()
if version_and_size.size_v1 is not None:
k = version_and_size.size_v1
level = prover.get_compression_level()
if level == 0:
if k >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. "
f"Size is {stat_info.st_size / (1024**3)} GiB, "
f"but expected at least: {expected_size / (1024**3)} GiB. "
"We assume the file is being copied."
)
return None
else:
# TODO: todo_v2_plots do we need to check v2 plots?
pass
k = prover.get_size()
level = prover.get_compression_level()
if (
level == 0
and stat_info.st_size < 0.98 * expected_size
and ((k.size_v1 is not None and k.size_v1 >= 30) or (k.size_v2 is not None and k.size_v2 >= 28))
):
log.warning(
f"Not farming plot {file_path}. "
f"Size is {stat_info.st_size / (1024**3)} GiB, "
f"but expected at least: {expected_size / (1024**3)} GiB. "
"We assume the file is being copied."
)
return None

cache_entry = CacheEntry.from_prover(prover)
self.cache.update(file_path, cache_entry)
Expand Down
Loading