diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 10b54ce9f..576dbcbea 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -170,7 +170,13 @@ jobs: ## Long-running tests. Put these first to limit the overall runtime of the ## test suite ar_coordinated_mining_tests, - ar_data_sync_tests, + ar_data_sync_recovers_from_corruption_test, + ar_data_sync_syncs_data_test, + ar_data_sync_syncs_after_joining_test, + ar_data_sync_mines_off_only_last_chunks_test, + ar_data_sync_mines_off_only_second_last_chunks_test, + ar_data_sync_disk_pool_rotation_test, + ar_data_sync_enqueue_intervals_test, ar_fork_recovery_tests, ar_tx, ar_packing_tests, @@ -251,6 +257,7 @@ jobs: ar_node, ar_node_utils, ar_nonce_limiter, + ar_node_worker, # ar_p3, # ar_p3_config, # ar_p3_db, diff --git a/apps/arweave/src/ar_block.erl b/apps/arweave/src/ar_block.erl index 4a7325335..965f0e585 100644 --- a/apps/arweave/src/ar_block.erl +++ b/apps/arweave/src/ar_block.erl @@ -21,7 +21,7 @@ get_max_nonce/1, get_recall_range_size/1, get_recall_byte/3, get_sub_chunk_size/1, get_nonces_per_chunk/1, get_nonces_per_recall_range/1, get_sub_chunk_index/2, - get_chunk_padded_offset/1]). + get_chunk_padded_offset/1, get_double_signing_condition/4]). -include("../include/ar.hrl"). -include("../include/ar_consensus.hrl"). @@ -666,7 +666,16 @@ get_chunk_padded_offset(Offset) -> Offset end. - +%% @doc Return true if the given cumulative difficulty - previous cumulative difficulty +%% pairs satisfy the double signing condition. +-spec get_double_signing_condition( + CDiff1 :: non_neg_integer(), + PrevCDiff1 :: non_neg_integer(), + CDiff2 :: non_neg_integer(), + PrevCDiff2 :: non_neg_integer() +) -> boolean(). +get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) -> + CDiff1 == CDiff2 orelse (CDiff1 > PrevCDiff2 andalso CDiff2 > PrevCDiff1). %%%=================================================================== %%% Private functions. diff --git a/apps/arweave/src/ar_block_cache.erl b/apps/arweave/src/ar_block_cache.erl index 3779fa94f..16a9fe5d8 100644 --- a/apps/arweave/src/ar_block_cache.erl +++ b/apps/arweave/src/ar_block_cache.erl @@ -8,7 +8,8 @@ get_longest_chain_cache/1, get_block_and_status/2, remove/2, get_checkpoint_block/1, prune/2, get_by_solution_hash/5, is_known_solution_hash/2, - get_siblings/2, get_fork_blocks/2, update_timestamp/3]). + get_siblings/2, get_fork_blocks/2, update_timestamp/3, + get_validated_front/1, get_blocks_by_miner/2]). -include_lib("arweave/include/ar.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -480,6 +481,36 @@ update_timestamp(Tab, H, ReceiveTimestamp) -> end end. +%% @doc Return the list of top validated blocks of the longest chains (the largest +%% cumulative difficulty blocks). +get_validated_front(Tab) -> + case ets:lookup(Tab, links) of + [] -> + []; + [{_, Set}] -> + get_validated_front(Tab, Set) + end. + +%% @doc Return all blocks from the cache mined by the given address. +get_blocks_by_miner(Tab, MinerAddr) -> + case ets:lookup(Tab, links) of + [{links, Set}] -> + gb_sets:fold( + fun({_Height, H}, Acc) -> + case ets:lookup(Tab, {block, H}) of + [{_, {B, _Status, _Timestamp, _Children}}] when B#block.reward_addr == MinerAddr -> + [B | Acc]; + _ -> + Acc + end + end, + [], + Set + ); + _ -> + [] + end. + %%%=================================================================== %%% Private functions. %%%=================================================================== @@ -720,6 +751,31 @@ update_longest_chain_cache(Tab) -> end, Result. +get_validated_front(Tab, Set) -> + {_MaxCDiff, Blocks} = gb_sets:fold( + fun({_Height, H}, {CurrentMaxCDiff, CurrentBlocks}) -> + case ets:lookup(Tab, {block, H}) of + [{_, {B, Status, _Timestamp, _Children}}] + when Status == validated; + Status == on_chain -> + CDiff = B#block.cumulative_diff, + case CDiff > CurrentMaxCDiff of + true -> + {CDiff, [B]}; + false when CDiff == CurrentMaxCDiff -> + {CurrentMaxCDiff, [B | CurrentBlocks]}; + false -> + {CurrentMaxCDiff, CurrentBlocks} + end; + _ -> + {CurrentMaxCDiff, CurrentBlocks} + end + end, + {0, []}, + Set + ), + Blocks. + %%%=================================================================== %%% Tests. %%%=================================================================== @@ -1760,3 +1816,196 @@ block_id(#block{ indep_hash = H }) -> on_top(B, PrevB) -> B#block{ previous_block = PrevB#block.indep_hash, height = PrevB#block.height + 1, previous_cumulative_diff = PrevB#block.cumulative_diff }. + +get_validated_front_test() -> + ets:new(bcache_test, [set, named_table]), + + %% Test empty cache + %% + %% Height Block/Status + %% + %% (empty) + ?assertEqual([], get_validated_front(bcache_test)), + + %% Test with a single on-chain block + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + new(bcache_test, B1 = random_block(0)), + ?assertEqual([B1], get_validated_front(bcache_test)), + + %% Test with a single validated block + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + add_validated(bcache_test, B2 = on_top(random_block(1), B1)), + ?assertEqual([B2], get_validated_front(bcache_test)), + + %% Test with multiple validated blocks at different heights + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + add_validated(bcache_test, B3 = on_top(random_block(2), B2)), + add_validated(bcache_test, B4 = on_top(random_block(3), B3)), + ?assertEqual([B4], get_validated_front(bcache_test)), + + %% Test with multiple blocks at the same highest cumulative difficulty + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + add_validated(bcache_test, B5 = on_top(random_block(3), B2)), + ?assertEqual(lists:sort([B4, B5]), + lists:sort(get_validated_front(bcache_test))), + + %% Test with non-validated blocks having higher cumulative difficulty + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + %% 4 B6/not_validated (cdiff=4) + add(bcache_test, B6 = on_top(random_block(4), B4)), + ?assertEqual(lists:sort([B4, B5]), + lists:sort(get_validated_front(bcache_test))), + + %% Test validating a block with higher cumulative difficulty + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + %% 4 B6/validated (cdiff=4) + add_validated(bcache_test, B6), + ?assertEqual([B6], get_validated_front(bcache_test)), + + %% Test with multiple forks at the same height and cumulative difficulty + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + %% 4 B6/validated (cdiff=4) + %% 4 B7/validated (cdiff=4) + %% 4 B8/validated (cdiff=4) + add_validated(bcache_test, B7 = on_top(random_block(4), B3)), + add_validated(bcache_test, B8 = on_top(random_block(4), B5)), + ?assertEqual(lists:sort([B6, B7, B8]), + lists:sort(get_validated_front(bcache_test))), + + %% Test with a mix of validated and non-validated blocks at different heights + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + %% 4 B6/validated (cdiff=4) + %% 4 B7/validated (cdiff=4) + %% 4 B8/validated (cdiff=4) + %% 5 B9/not_validated (cdiff=5) + %% 5 B10/not_validated (cdiff=5) + add(bcache_test, B9 = on_top(random_block(5), B6)), + add(bcache_test, B10 = on_top(random_block(5), B7)), + ?assertEqual(lists:sort([B6, B7, B8]), + lists:sort(get_validated_front(bcache_test))), + + %% Test validating blocks with higher cumulative difficulty + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + %% 4 B6/validated (cdiff=4) + %% 4 B7/validated (cdiff=4) + %% 4 B8/validated (cdiff=4) + %% 5 B9/validated (cdiff=5) + %% 5 B10/validated (cdiff=5) + add_validated(bcache_test, B9), + add_validated(bcache_test, B10), + ?assertEqual(lists:sort([B9, B10]), + lists:sort(get_validated_front(bcache_test))), + + %% Test with on_chain blocks at the highest cumulative difficulty + %% + %% Height Block/Status + %% + %% 0 B1/on_chain + %% 1 B2/validated (cdiff=1) + %% 2 B3/validated (cdiff=2) + %% 3 B4/validated (cdiff=3) + %% 3 B5/validated (cdiff=3) + %% 4 B6/validated (cdiff=4) + %% 4 B7/validated (cdiff=4) + %% 4 B8/validated (cdiff=4) + %% 5 B9/validated (cdiff=5) + %% 5 B10/validated (cdiff=5) + %% 6 B11/validated (cdiff=6) + %% 6 B12/on_chain (cdiff=7) + add(bcache_test, B11 = on_top(random_block(6), B9)), + mark_tip(bcache_test, block_id(B11)), + ?assertEqual([B11], get_validated_front(bcache_test)), + add(bcache_test, B12 = on_top(random_block(6), B9)), + mark_tip(bcache_test, block_id(B12)), + ?assertEqual(lists:sort([B11, B12]), + lists:sort(get_validated_front(bcache_test))), + + ets:delete(bcache_test). + +%% @doc Test that get_blocks_by_miner returns the correct blocks for a given miner. +get_blocks_by_miner_test() -> + ets:new(bcache_test, [set, named_table]), + new(bcache_test, B0 = random_block(0)), + Tab = bcache_test, + ?assertEqual([], get_blocks_by_miner(Tab, <<"miner1">>)), + % Create some test blocks + B1 = #block{ indep_hash = <<"hash1">>, reward_addr = <<"miner1">> }, + B2 = #block{ indep_hash = <<"hash2">>, reward_addr = <<"miner2">> }, + B3 = #block{ indep_hash = <<"hash3">>, reward_addr = <<"miner1">> }, + % Add blocks to cache + add(Tab, on_top(B1, B0)), + add(Tab, on_top(B2, B0)), + add(Tab, on_top(B3, B0)), + B1_1 = B1#block{ + height = 1, + previous_block = B0#block.indep_hash, + previous_cumulative_diff = B0#block.cumulative_diff }, + B2_1 = B2#block{ + height = 1, + previous_block = B0#block.indep_hash, + previous_cumulative_diff = B0#block.cumulative_diff }, + B3_1 = B3#block{ + height = 1, + previous_block = B0#block.indep_hash, + previous_cumulative_diff = B0#block.cumulative_diff }, + + % Test getting blocks by miner + ?assertEqual([B1_1, B3_1], lists:sort(fun(A, B) -> A#block.indep_hash < B#block.indep_hash end, get_blocks_by_miner(Tab, <<"miner1">>))), + ?assertEqual([B2_1], get_blocks_by_miner(Tab, <<"miner2">>)), + ?assertEqual([], get_blocks_by_miner(Tab, <<"miner3">>)), + ets:delete(Tab). \ No newline at end of file diff --git a/apps/arweave/src/ar_chain_stats.erl b/apps/arweave/src/ar_chain_stats.erl index 6033d583c..9b1d1f771 100644 --- a/apps/arweave/src/ar_chain_stats.erl +++ b/apps/arweave/src/ar_chain_stats.erl @@ -95,14 +95,22 @@ record_fork_depth([], _ForkRootB, N) -> prometheus_histogram:observe(fork_recovery_depth, N), ok; record_fork_depth([H | Orphans], ForkRootB, N) -> - ?LOG_INFO([ + SolutionHashInfo = + case ar_block_cache:get(block_cache, H) of + not_found -> + %% Should never happen, by construction. + ?LOG_ERROR([{event, block_not_found_in_cache}, {h, ar_util:encode(H)}]), + []; + #block{ hash = SolutionH } -> + [{solution_hash, ar_util:encode(SolutionH)}] + end, + LogInfo = [ {event, orphaning_block}, {block, ar_util:encode(H)}, {depth, N}, {fork_root, ar_util:encode(ForkRootB#block.indep_hash)}, - {fork_height, ForkRootB#block.height + 1} - ]), + {fork_height, ForkRootB#block.height + 1} | SolutionHashInfo], + ?LOG_INFO(LogInfo), record_fork_depth(Orphans, ForkRootB, N + 1). - %%%=================================================================== %%% Tests. %%%=================================================================== diff --git a/apps/arweave/src/ar_mining_cache.erl b/apps/arweave/src/ar_mining_cache.erl index 2d1ba72ec..bf55f4d4b 100644 --- a/apps/arweave/src/ar_mining_cache.erl +++ b/apps/arweave/src/ar_mining_cache.erl @@ -146,13 +146,19 @@ release_for_session(SessionId, Size, Cache0) -> -spec drop_session(SessionId :: term(), Cache0 :: #ar_mining_cache{}) -> Cache1 :: #ar_mining_cache{}. drop_session(SessionId, Cache0) -> - Cache0#ar_mining_cache{ - mining_cache_sessions = maps:remove(SessionId, Cache0#ar_mining_cache.mining_cache_sessions), - mining_cache_sessions_queue = queue:filter( - fun(SessionId0) -> SessionId0 =/= SessionId end, - Cache0#ar_mining_cache.mining_cache_sessions_queue - ) - }. + ?LOG_DEBUG([{event, drop_session}, {session_id, SessionId}]), + case maps:take(SessionId, Cache0#ar_mining_cache.mining_cache_sessions) of + {Session, Sessions} -> + maybe_search_for_anomalies(SessionId, Session), + Cache0#ar_mining_cache{ + mining_cache_sessions = Sessions, + mining_cache_sessions_queue = queue:filter( + fun(SessionId0) -> SessionId0 =/= SessionId end, + Cache0#ar_mining_cache.mining_cache_sessions_queue + ) + }; + _ -> Cache0 + end. %% @doc Checks if a session exists in the cache. -spec session_exists(SessionId :: term(), Cache0 :: #ar_mining_cache{}) -> @@ -286,6 +292,132 @@ with_mining_cache_session(SessionId, Fun, Cache0) -> {error, session_not_found} end. +%% Searches for anomalies in the mining cache session. +%% If the actual cache size is different from the expected cache size, +%% it will log a warning. +%% If the reserved cache size is different from 0, it will log a warning. +%% It will also search for invalid cache values, e.g. missing chunks, or failed +%% invariants. +%% +%% Perhaps it is a good idea to put this under a config flag, disabled by default. +maybe_search_for_anomalies(SessionId, #ar_mining_cache_session{ + mining_cache = MiningCache, + mining_cache_size_bytes = MiningCacheSize, + reserved_mining_cache_bytes = ReservedMiningCacheBytes +}) -> + ActualCacheSize = maybe_search_for_anomalies_cache_values(SessionId, MiningCache), + case {ActualCacheSize, MiningCacheSize} of + {0, 0} -> ok; + {EqualSize, EqualSize} -> ?LOG_WARNING([ + {event, mining_cache_anomaly}, {anomaly, cache_size_non_zero}, + {session_id, SessionId}, {actual_size, ActualCacheSize}, {reported_size, MiningCacheSize}]); + {_, _} -> ?LOG_WARNING([ + {event, mining_cache_anomaly}, {anomaly, cache_size_mismatch}, + {session_id, SessionId}, {actual_size, ActualCacheSize}, {reported_size, MiningCacheSize}]) + end, + case ReservedMiningCacheBytes of + 0 -> ok; + _ -> ?LOG_WARNING([ + {event, mining_cache_anomaly}, {anomaly, reserved_size_non_zero}, + {session_id, SessionId}, {actual_size, ReservedMiningCacheBytes}, {expected_size, 0}]) + end, + ?LOG_DEBUG([{event, mining_cache_anomaly}, {anomaly, mining_cache_anomaly_search_completed}, {session_id, SessionId}]); +maybe_search_for_anomalies(SessionId, _InvalidSession) -> + ?LOG_ERROR([{event, mining_cache_anomaly}, {anomaly, invalid_session_type}, {session_id, SessionId}]), + ok. + +maybe_search_for_anomalies_cache_values(SessionId, MiningCache) when is_map(MiningCache) -> + OuterAcc0 = {_Anomalies = #{}, _ActualSize = 0}, + {Anomalies, ActualSize} = maps:fold(fun(Key, Value, {Anomalies0, ActualSize0}) -> + Anomalies1 = lists:foldl(fun(Check, Anomalies) -> Check({Key, Value}, Anomalies) end, Anomalies0, [ + fun maybe_search_for_anomalies_cache_values_chunk1_missing/2, + fun maybe_search_for_anomalies_cache_values_chunk1_stale/2, + fun maybe_search_for_anomalies_cache_values_chunk2_missing/2, + fun maybe_search_for_anomalies_cache_values_chunk2_stale/2, + fun maybe_search_for_anomalies_cache_values_h1_missing/2, + fun maybe_search_for_anomalies_cache_values_h2_missing/2, + fun maybe_search_for_anomalies_cache_values_h1_passes_diff_checks_present/2 + ]), + {Anomalies1, ActualSize0 + cached_value_size(Value)} + end, OuterAcc0, MiningCache), + case maps:size(Anomalies) > 0 of + true -> ?LOG_WARNING([ + {event, mining_cache_anomaly}, {anomaly, cached_values_anomalies}, + {anomalies, Anomalies}, {session_id, SessionId}]); + false -> ok + end, + ActualSize; +maybe_search_for_anomalies_cache_values(SessionId, _InvalidCache) -> + ?LOG_ERROR([{event, mining_cache_anomaly}, {anomaly, invalid_cache_type}, {session_id, SessionId}]), + 0. + +maybe_search_for_anomalies_cache_values_chunk1_missing({ + Key, + #ar_mining_cache_value{chunk1 = undefined, chunk1_missing = false} = Value +}, Anomalies) -> + maps:update_with(chunk1_missing, fun(V) -> V + 1 end, 1, + maps:update_with(chunk1_missing_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_chunk1_missing({_, _}, Anomalies) -> + Anomalies. + +maybe_search_for_anomalies_cache_values_chunk1_stale({ + Key, + #ar_mining_cache_value{chunk1 = Chunk1, chunk1_missing = true} = Value +}, Anomalies) when undefined =/= Chunk1 -> + maps:update_with(chunk1_stale, fun(V) -> V + 1 end, 1, + maps:update_with(chunk1_stale_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_chunk1_stale({_, _}, Anomalies) -> + Anomalies. + +maybe_search_for_anomalies_cache_values_chunk2_missing({ + Key, + #ar_mining_cache_value{chunk2 = undefined, chunk2_missing = false} = Value +}, Anomalies) -> + maps:update_with(chunk2_missing, fun(V) -> V + 1 end, 1, + maps:update_with(chunk2_missing_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_chunk2_missing({_, _}, Anomalies) -> + Anomalies. + +maybe_search_for_anomalies_cache_values_chunk2_stale({ + Key, + #ar_mining_cache_value{chunk2 = Chunk2, chunk2_missing = true} = Value +}, Anomalies) when undefined =/= Chunk2 -> + maps:update_with(chunk2_stale, fun(V) -> V + 1 end, 1, + maps:update_with(chunk2_stale_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_chunk2_stale({_, _}, Anomalies) -> + Anomalies. + +maybe_search_for_anomalies_cache_values_h1_missing({ + Key, + #ar_mining_cache_value{h1 = undefined, chunk1 = Chunk1} = Value +}, Anomalies) +when undefined =/= Chunk1 -> + maps:update_with(h1_missing, fun(V) -> V + 1 end, 1, + maps:update_with(h1_missing_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_h1_missing({_, _}, Anomalies) -> + Anomalies. + +maybe_search_for_anomalies_cache_values_h2_missing({ + Key, + #ar_mining_cache_value{h2 = undefined, chunk2 = Chunk2} = Value +}, Anomalies) +when undefined =/= Chunk2 -> + maps:update_with(h2_missing, fun(V) -> V + 1 end, 1, + maps:update_with(h2_missing_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_h2_missing({_, _}, Anomalies) -> + Anomalies. + +maybe_search_for_anomalies_cache_values_h1_passes_diff_checks_present({ + Key, + #ar_mining_cache_value{h1_passes_diff_checks = true} = Value +}, Anomalies) -> + maps:update_with(h1_passes_diff_checks_present, fun(V) -> V + 1 end, 1, + maps:update_with(h1_passes_diff_checks_present_sample, fun(V) -> V end, {Key, Value}, Anomalies)); +maybe_search_for_anomalies_cache_values_h1_passes_diff_checks_present({_, _}, Anomalies) -> + Anomalies. + + + %%%=================================================================== %%% Tests. %%%=================================================================== diff --git a/apps/arweave/src/ar_mining_server.erl b/apps/arweave/src/ar_mining_server.erl index 4b9d4a1c8..32c088c2a 100644 --- a/apps/arweave/src/ar_mining_server.erl +++ b/apps/arweave/src/ar_mining_server.erl @@ -13,11 +13,11 @@ -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). --include_lib("arweave/include/ar.hrl"). --include_lib("arweave/include/ar_consensus.hrl"). --include_lib("arweave/include/ar_config.hrl"). --include_lib("arweave/include/ar_data_discovery.hrl"). --include_lib("arweave/include/ar_mining.hrl"). +-include("ar.hrl"). +-include("ar_consensus.hrl"). +-include("ar_config.hrl"). +-include("ar_data_discovery.hrl"). +-include("ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). @@ -671,57 +671,9 @@ prepare_solution(last_step_checkpoints, Candidate, Solution) -> _ -> LastStepCheckpoints end, - prepare_solution(steps, Candidate, Solution#mining_solution{ + prepare_solution(proofs, Candidate, Solution#mining_solution{ last_step_checkpoints = LastStepCheckpoints2 }); -prepare_solution(steps, Candidate, Solution) -> - #mining_candidate{ step_number = StepNumber } = Candidate, - [{_, TipNonceLimiterInfo}] = ets:lookup(node_state, nonce_limiter_info), - #nonce_limiter_info{ global_step_number = PrevStepNumber, seed = PrevSeed, - next_seed = PrevNextSeed, - next_vdf_difficulty = PrevNextVDFDifficulty } = TipNonceLimiterInfo, - case StepNumber > PrevStepNumber of - true -> - Steps = ar_nonce_limiter:get_steps( - PrevStepNumber, StepNumber, PrevNextSeed, PrevNextVDFDifficulty), - case Steps of - not_found -> - CurrentSessionKey = ar_nonce_limiter:session_key(TipNonceLimiterInfo), - SolutionSessionKey = Candidate#mining_candidate.session_key, - LogData = [ - {current_session_key, - ar_nonce_limiter:encode_session_key(CurrentSessionKey)}, - {solution_session_key, - ar_nonce_limiter:encode_session_key(SolutionSessionKey)}, - {start_step_number, PrevStepNumber}, - {next_step_number, StepNumber}, - {seed, ar_util:safe_encode(PrevSeed)}, - {next_seed, ar_util:safe_encode(PrevNextSeed)}, - {next_vdf_difficulty, PrevNextVDFDifficulty}, - {h1, ar_util:safe_encode(Candidate#mining_candidate.h1)}, - {h2, ar_util:safe_encode(Candidate#mining_candidate.h2)}], - ?LOG_INFO([{event, found_solution_but_failed_to_find_checkpoints} - | LogData]), - may_be_leave_it_to_exit_peer( - prepare_solution(proofs, Candidate, - Solution#mining_solution{ steps = [] }), - step_checkpoints_not_found, LogData); - _ -> - prepare_solution(proofs, Candidate, - Solution#mining_solution{ steps = Steps }) - end; - false -> - log_prepare_solution_failure(Solution, stale, stale_step_number, miner, [ - {start_step_number, PrevStepNumber}, - {next_step_number, StepNumber}, - {next_seed, ar_util:safe_encode(PrevNextSeed)}, - {next_vdf_difficulty, PrevNextVDFDifficulty}, - {h1, ar_util:safe_encode(Candidate#mining_candidate.h1)}, - {h2, ar_util:safe_encode(Candidate#mining_candidate.h2)} - ]), - error - end; - prepare_solution(proofs, Candidate, Solution) -> #mining_candidate{ h0 = H0, h1 = H1, h2 = H2, nonce = Nonce, partition_number = PartitionNumber, diff --git a/apps/arweave/src/ar_mining_stats.erl b/apps/arweave/src/ar_mining_stats.erl index 7ce0e1828..04cc10a3a 100644 --- a/apps/arweave/src/ar_mining_stats.erl +++ b/apps/arweave/src/ar_mining_stats.erl @@ -4,7 +4,7 @@ -export([start_link/0, start_performance_reports/0, pause_performance_reports/1, mining_paused/0, set_total_data_size/1, set_storage_module_data_size/6, vdf_computed/0, raw_read_rate/2, chunks_read/2, h1_computed/2, h2_computed/2, - h1_solution/0, h2_solution/0, block_found/0, + h1_solution/0, h2_solution/0, block_found/0, block_mined_but_orphaned/0, h1_sent_to_peer/2, h1_received_from_peer/2, h2_sent_to_peer/1, h2_received_from_peer/1, get_partition_data_size/2]). @@ -178,6 +178,12 @@ block_found() -> block_found(Now) -> increment_count(confirmed_block, 1, Now). +block_mined_but_orphaned() -> + block_mined_but_orphaned(erlang:monotonic_time(millisecond)). + +block_mined_but_orphaned(Now) -> + increment_count(block_mined_but_orphaned, 1, Now). + set_total_data_size(DataSize) -> try prometheus_gauge:set(v2_index_data_size, DataSize), diff --git a/apps/arweave/src/ar_mining_worker.erl b/apps/arweave/src/ar_mining_worker.erl index bc1a66d27..1fcb12e9f 100644 --- a/apps/arweave/src/ar_mining_worker.erl +++ b/apps/arweave/src/ar_mining_worker.erl @@ -423,11 +423,11 @@ handle_task({computed_h1, Candidate, _ExtraArgs}, State) -> (#ar_mining_cache_value{chunk2 = undefined} = CachedValue) -> %% chunk2 hasn't been read yet, so we cache H1 and wait for it. %% If H1 passes diff checks, we will skip H2 for this nonce. - {ok, CachedValue#ar_mining_cache_value{h1 = H1, h1_passes_diff_checks = H1PassesDiffChecks}}; + {ok, CachedValue#ar_mining_cache_value{ h1 = H1, h1_passes_diff_checks = H1PassesDiffChecks }}; (#ar_mining_cache_value{chunk2 = Chunk2} = CachedValue) when not H1PassesDiffChecks -> %% chunk2 has already been read, so we can compute H2 now. ar_mining_hash:compute_h2(self(), Candidate#mining_candidate{ chunk2 = Chunk2 }), - {ok, CachedValue#ar_mining_cache_value{h1 = H1}}; + {ok, CachedValue#ar_mining_cache_value{ h1 = H1 }}; (#ar_mining_cache_value{chunk2 = _Chunk2} = _CachedValue) when H1PassesDiffChecks -> %% H1 passes diff checks, so we skip H2 for this nonce. %% Might as well drop the cached data, we don't need it anymore. @@ -481,18 +481,7 @@ handle_task({computed_h2, Candidate, _ExtraArgs}, State) -> %% In case of solo mining, the `Check` will always be `true`. %% In case of pool mining, the `Check` will be `partial` or `true`. %% In either case, we prepare and post the solution. - case Candidate#mining_candidate.chunk1 of - not_set -> - ?LOG_ERROR([{event, received_solution_candidate_without_chunk1_in_solo_mining}, - {worker, State#state.name}, - {step, Candidate#mining_candidate.step_number}, - {nonce, Candidate#mining_candidate.nonce}, - {session_key, ar_nonce_limiter:encode_session_key(Candidate#mining_candidate.session_key)}, - {h2, ar_util:encode(H2)}]), - ok; - _ -> - ar_mining_server:prepare_and_post_solution(Candidate) - end; + ar_mining_server:prepare_and_post_solution(Candidate); {Check, _} when partial == Check orelse true == Check -> %% This branch only handles the case where we're part of a coordinated mining set. %% In this case, we prepare the PoA2 and send it to the lead peer. @@ -673,7 +662,6 @@ process_chunks( %% Nonce falls in a chunk beyond the current chunk offset, (for example, because we %% read extra chunk in the beginning of recall range). Move ahead to the next %% chunk offset. - %% No need to remove anything from cache, as the nonce is still in the recall range. process_chunks( WhichChunk, Candidate, RangeStart, Nonce, NoncesPerChunk, NoncesPerRecallRange, ChunkOffsets, SubChunkSize, Count, State @@ -718,7 +706,7 @@ process_sub_chunk(chunk1, Candidate, SubChunk, State) -> ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Candidate#mining_candidate.nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, - fun(CachedValue) -> {ok, CachedValue#ar_mining_cache_value{chunk1 = SubChunk}} end + fun(CachedValue) -> {ok, CachedValue#ar_mining_cache_value{ chunk1 = SubChunk }} end ) of {ok, ChunkCache2} -> State#state{ chunk_cache = ChunkCache2 }; {error, Reason} -> @@ -736,6 +724,10 @@ process_sub_chunk(chunk2, Candidate, SubChunk, State) -> Candidate#mining_candidate.session_key, State#state.chunk_cache, fun + (#ar_mining_cache_value{chunk1_missing = true}) -> + %% We've already marked the chunk1 as missing, so there was no reservation for it. + %% Since there is no need to calculate H2, we can just drop the cached value. + {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{h1_passes_diff_checks = true} = _CachedValue) -> %% H1 passes diff checks, so we skip H2 for this nonce. %% Drop the cached data, we don't need it anymore. @@ -744,11 +736,11 @@ process_sub_chunk(chunk2, Candidate, SubChunk, State) -> {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; (#ar_mining_cache_value{h1 = undefined} = CachedValue) -> %% H1 is not yet calculated, cache the chunk2 for this nonce. - {ok, CachedValue#ar_mining_cache_value{chunk2 = SubChunk}}; - (#ar_mining_cache_value{h1 = H1} = CachedValue) -> + {ok, CachedValue#ar_mining_cache_value{ chunk2 = SubChunk }}; + (#ar_mining_cache_value{h1 = H1, chunk1 = Chunk1} = CachedValue) -> %% H1 is already calculated, compute H2 and cache the chunk2 for this nonce. - ar_mining_hash:compute_h2(self(), Candidate2#mining_candidate{ h1 = H1 }), - {ok, CachedValue#ar_mining_cache_value{chunk2 = SubChunk}} + ar_mining_hash:compute_h2(self(), Candidate2#mining_candidate{ h1 = H1, chunk1 = Chunk1 }), + {ok, CachedValue#ar_mining_cache_value{ chunk2 = SubChunk }} end ) of {ok, ChunkCache2} -> State#state{ chunk_cache = ChunkCache2 }; @@ -939,17 +931,14 @@ mark_single_chunk1_missing_or_drop(Nonce, NoncesLeft, Candidate, State) -> %% We've already marked the chunk2 as missing, so there was no reservation for it. %% We can just drop the cached value. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; - (#ar_mining_cache_value{chunk2 = Chunk2}) when undefined /= Chunk2 -> + (#ar_mining_cache_value{chunk2 = Chunk2}) when is_binary(Chunk2) -> %% We've already read the chunk2 from disk, so we can just drop the cached value. %% The cache reservation for corresponding chunk2 was already consumed. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; - (CachedValue) -> + (#ar_mining_cache_value{chunk2 = undefined} = CachedValue) -> %% Mark the chunk1 as missing. - %% If the corresponding chunk2 will be read from disk, it will be dropped immediately. - %% If we didn't read the chunk2 from disk, we didn't reserve the cache space for it; - %% in this case the cached value will hang in the cache until the session will be dropped, - %% but it will not contain any large binaries, so it will not consume any significant memory. - {ok, CachedValue#ar_mining_cache_value{chunk1_missing = true}, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)} + %% When the corresponding chunk2 will be read from disk, it will be dropped immediately. + {ok, CachedValue#ar_mining_cache_value{ chunk1_missing = true }, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)} end ) of {ok, ChunkCache1} -> @@ -976,12 +965,12 @@ mark_single_chunk2_missing_or_drop(Nonce, NoncesLeft, Candidate, State) -> %% We've already marked the chunk1 as missing, so the reservation for it was released. %% We can just drop the cached value and release the reservation for a single subchunk. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; - (#ar_mining_cache_value{chunk1 = Chunk1, h1 = undefined} = CachedValue) when undefined /= Chunk1 -> + (#ar_mining_cache_value{chunk1 = Chunk1, h1 = undefined} = CachedValue) when is_binary(Chunk1) -> %% We have the corresponding chunk1, but we didn't calculate H1 yet. %% Mark chunk2 as missing to drop the cached value after we calculate H1. %% Drop the reservation for a single subchunk. - {ok, CachedValue#ar_mining_cache_value{chunk2_missing = true}, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; - (#ar_mining_cache_value{h1 = H1}) when undefined /= H1 -> + {ok, CachedValue#ar_mining_cache_value{ chunk2_missing = true }, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; + (#ar_mining_cache_value{h1 = H1}) when is_binary(H1) -> %% We've already calculated H1, so we can drop the cached value. %% Drop the reservation for a single subchunk. {ok, drop, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)}; @@ -989,7 +978,7 @@ mark_single_chunk2_missing_or_drop(Nonce, NoncesLeft, Candidate, State) -> %% The corresponding chunk1 is not missing but we didn't read it yet, so %% we just mark the chunk2 as missing and continue. %% Drop the reservation for a single subchunk. - {ok, CachedValue#ar_mining_cache_value{chunk2_missing = true}, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)} + {ok, CachedValue#ar_mining_cache_value{ chunk2_missing = true }, -ar_block:get_sub_chunk_size(Candidate#mining_candidate.packing_difficulty)} end ) of {ok, ChunkCache1} -> @@ -1004,7 +993,7 @@ mark_single_chunk2_missing_or_drop(Nonce, NoncesLeft, Candidate, State) -> %% @doc Mark the chunk2 as missing for the whole recall range. mark_second_recall_range_missing(Candidate, State) -> #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, - mark_second_recall_range_missing(0, ar_block:get_max_nonce(PackingDifficulty), Candidate, State). + mark_second_recall_range_missing(0, ar_block:get_nonces_per_recall_range(PackingDifficulty), Candidate, State). mark_second_recall_range_missing(_Nonce, 0, _Candidate, State) -> State; mark_second_recall_range_missing(Nonce, NoncesLeft, Candidate, State) -> @@ -1012,7 +1001,7 @@ mark_second_recall_range_missing(Nonce, NoncesLeft, Candidate, State) -> ?CACHE_KEY(Candidate#mining_candidate.cache_ref, Nonce), Candidate#mining_candidate.session_key, State#state.chunk_cache, - fun(CachedValue) -> {ok, CachedValue#ar_mining_cache_value{chunk2_missing = true}} end + fun(CachedValue) -> {ok, CachedValue#ar_mining_cache_value{ chunk2_missing = true }} end ) of {ok, ChunkCache1} -> mark_second_recall_range_missing(Nonce + 1, NoncesLeft - 1, Candidate, State#state{ chunk_cache = ChunkCache1 }); @@ -1033,7 +1022,7 @@ cache_h1_list(Candidate, [ {H1, Nonce} | H1List ], State) -> fun(CachedValue) -> %% Store the H1 received from peer, and set chunk2_missing to false, %% marking that we have a recall range for this H1 list. - {ok, CachedValue#ar_mining_cache_value{h1 = H1, chunk2_missing = false}} + {ok, CachedValue#ar_mining_cache_value{ h1 = H1, chunk2_missing = false }} end ) of {ok, ChunkCache1} -> @@ -1077,17 +1066,17 @@ hash_computed(WhichHash, Candidate, State) -> report_and_reset_hashes(State) -> maps:foreach( - fun(Key, Value) -> - ar_mining_stats:h1_computed(Key, Value) - end, - State#state.h1_hashes - ), + fun(Key, Value) -> + ar_mining_stats:h1_computed(Key, Value) + end, + State#state.h1_hashes + ), maps:foreach( - fun(Key, Value) -> - ar_mining_stats:h2_computed(Key, Value) - end, - State#state.h2_hashes - ), + fun(Key, Value) -> + ar_mining_stats:h2_computed(Key, Value) + end, + State#state.h2_hashes + ), State#state{ h1_hashes = #{}, h2_hashes = #{} }. report_chunk_cache_metrics(#state{chunk_cache = ChunkCache, partition_number = Partition} = State) -> diff --git a/apps/arweave/src/ar_node_utils.erl b/apps/arweave/src/ar_node_utils.erl index 68a4de2d7..5ff6c3ff5 100644 --- a/apps/arweave/src/ar_node_utils.erl +++ b/apps/arweave/src/ar_node_utils.erl @@ -240,7 +240,7 @@ may_be_apply_double_signing_proof(#block{ may_be_apply_double_signing_proof(B, PrevB, Accounts) -> {_Pub, _Signature1, CDiff1, PrevCDiff1, _Preimage1, _Signature2, CDiff2, PrevCDiff2, _Preimage2} = B#block.double_signing_proof, - case CDiff1 == CDiff2 orelse (CDiff1 > PrevCDiff2 andalso CDiff2 > PrevCDiff1) of + case ar_block:get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) of false -> {error, invalid_double_signing_proof_cdiff}; true -> diff --git a/apps/arweave/src/ar_node_worker.erl b/apps/arweave/src/ar_node_worker.erl index ffc59f55f..d1645f082 100644 --- a/apps/arweave/src/ar_node_worker.erl +++ b/apps/arweave/src/ar_node_worker.erl @@ -14,13 +14,13 @@ -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). -export([set_reward_addr/1]). --include("../include/ar.hrl"). --include("../include/ar_consensus.hrl"). --include("../include/ar_config.hrl"). --include("../include/ar_pricing.hrl"). --include("../include/ar_data_sync.hrl"). --include("../include/ar_vdf.hrl"). --include("../include/ar_mining.hrl"). +-include("ar.hrl"). +-include("ar_consensus.hrl"). +-include("ar_config.hrl"). +-include("ar_pricing.hrl"). +-include("ar_data_sync.hrl"). +-include("ar_vdf.hrl"). +-include("ar_mining.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -334,9 +334,13 @@ handle_cast({found_solution, miner, _Solution, _PoACache, _PoA2Cache}, #{ automine := false, miner_2_6 := undefined } = State) -> {noreply, State}; handle_cast({found_solution, Source, Solution, PoACache, PoA2Cache}, State) -> - [{_, PrevH}] = ets:lookup(node_state, current), - PrevB = ar_block_cache:get(block_cache, PrevH), - handle_found_solution({Source, Solution, PoACache, PoA2Cache}, PrevB, State, false); + case pick_prev_block_for_solution(Solution, Source) of + not_found -> + {noreply, State}; + {PrevB, Solution2} -> + handle_found_solution( + {Source, Solution2, PoACache, PoA2Cache}, PrevB, State, false) + end; handle_cast(process_task_queue, #{ task_queue := TaskQueue } = State) -> RunTask = @@ -1097,8 +1101,7 @@ may_be_get_double_signing_proof2(Iterator, RootHash, LockedRewards, Height) -> false -> false; true -> - CDiff1 == CDiff2 - orelse (CDiff1 > PrevCDiff2 andalso CDiff2 > PrevCDiff1) + ar_block:get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) end, case ValidCDiffs of false -> @@ -1902,6 +1905,95 @@ dump_mempool(TXs, MempoolSize) -> ?LOG_ERROR([{event, failed_to_dump_mempool}, {reason, Reason}]) end. +pick_prev_block_for_solution(Solution, Source) -> + FrontBlocks = ar_block_cache:get_validated_front(block_cache), + PrevBlocks = [ar_block_cache:get(block_cache, B#block.previous_block) + || B <- FrontBlocks], + pick_prev_block_from_front_blocks(Solution, Source, FrontBlocks, PrevBlocks). + +%% @doc Search for a base block among the validated front blocks. +pick_prev_block_from_front_blocks(Solution, Source, FrontBlocks, PrevBlocks) -> + #mining_solution{ mining_address = MiningAddress } = Solution, + case [B || B <- FrontBlocks, B#block.reward_addr == MiningAddress] of + [PrevB | _] -> + pick_prev_block_for_solution(Solution, Source, PrevB); + [] -> + pick_prev_block_from_front_blocks2(Solution, Source, FrontBlocks, PrevBlocks) + end. + +pick_prev_block_from_front_blocks2(Solution, Source, [PrevB | FrontBlocks], PrevBlocks) -> + #mining_solution{ step_number = StepNumber } = Solution, + MaybeSolutionOffPrevB = may_be_place_solution_on_block(PrevB, StepNumber, Solution), + case MaybeSolutionOffPrevB of + {false, _Reason} -> + pick_prev_block_from_front_blocks2(Solution, Source, FrontBlocks, PrevBlocks); + Solution2 -> + {PrevB, Solution2} + end; +pick_prev_block_from_front_blocks2(Solution, Source, [], PrevBlocks) -> + pick_prev_block_from_prev_blocks(Solution, Source, PrevBlocks). + +%% @doc Search for a base block among the previous blocks of the +%% validated front blocks. +pick_prev_block_from_prev_blocks(Solution, Source, PrevBlocks) -> + #mining_solution{ mining_address = MiningAddress } = Solution, + case [B || B <- PrevBlocks, B#block.reward_addr == MiningAddress] of + [PrevB | _] -> + pick_prev_block_for_solution(Solution, Source, PrevB); + [] -> + pick_prev_block_from_prev_blocks2(Solution, Source, PrevBlocks) + end. + +pick_prev_block_from_prev_blocks2(Solution, Source, [PrevB | PrevBlocks]) -> + #mining_solution{ step_number = StepNumber } = Solution, + MaybeSolutionOffPrevB = may_be_place_solution_on_block(PrevB, StepNumber, Solution), + case MaybeSolutionOffPrevB of + {false, _Reason} -> + pick_prev_block_from_prev_blocks2(Solution, Source, PrevBlocks); + Solution2 -> + {PrevB, Solution2} + end; +pick_prev_block_from_prev_blocks2(Solution, Source, []) -> + #mining_solution{ step_number = StepNumber, solution_hash = SolutionH } = Solution, + ar_mining_server:log_prepare_solution_failure( + Solution, stale, no_suitable_prev_block, Source, + [{step_number, StepNumber}, {solution, ar_util:encode(SolutionH)}]), + not_found. + +pick_prev_block_for_solution(Solution, Source, PrevB) -> + #mining_solution{ + step_number = StepNumber, + solution_hash = SolutionH + } = Solution, + case may_be_place_solution_on_block(PrevB, StepNumber, Solution) of + {false, Reason} -> + ar_mining_server:log_prepare_solution_failure( + Solution, stale, Reason, Source, + [{step_number, StepNumber}, + {solution, ar_util:encode(SolutionH)}, + {prev_block, ar_util:encode(PrevB#block.indep_hash)}]), + not_found; + Solution2 -> + {PrevB, Solution2} + end. + +may_be_place_solution_on_block(PrevB, StepNumber, Solution) -> + NonceLimiterInfo = PrevB#block.nonce_limiter_info, + #nonce_limiter_info{ global_step_number = PrevStepNumber, next_seed = PrevNextSeed, + next_vdf_difficulty = PrevNextVDFDifficulty } = NonceLimiterInfo, + case StepNumber > PrevStepNumber of + true -> + case ar_nonce_limiter:get_steps( + PrevStepNumber, StepNumber, PrevNextSeed, PrevNextVDFDifficulty) of + not_found -> + {false, stale_vdf_session}; + Steps -> + Solution#mining_solution{ steps = Steps } + end; + false -> + {false, stale_step_number} + end. + handle_found_solution(Args, PrevB, State, IsRebase) -> {Source, Solution, PoACache, PoA2Cache} = Args, #mining_solution{ @@ -1933,7 +2025,6 @@ handle_found_solution(Args, PrevB, State, IsRebase) -> nonce_limiter_info = PrevNonceLimiterInfo, height = PrevHeight } = PrevB, Height = PrevHeight + 1, - Now = os:system_time(second), MaxDeviation = ar_block:get_max_timestamp_deviation(), Timestamp = @@ -2077,12 +2168,29 @@ handle_found_solution(Args, PrevB, State, IsRebase) -> {false, rebase_threshold} end end, - %% Check steps and step checkpoints. - HaveSteps = + + PrevCDiff = PrevB#block.cumulative_diff, + CDiff = ar_difficulty:next_cumulative_diff(PrevCDiff, Diff, Height), + NoDoubleSigning = case CorrectRebaseThreshold of {false, Reason5} -> + {false, Reason5}; + true -> + case check_no_double_signing(CDiff, PrevCDiff, MiningAddress, Height) of + false -> + {false, double_signing}; + true -> + true + end + end, + + %% Check steps and step checkpoints. + HaveSteps = + case NoDoubleSigning of + {false, Reason6} -> ?LOG_WARNING([{event, ignore_mining_solution}, - {reason, Reason5}, {solution, ar_util:encode(SolutionH)}]), + {reason, Reason6}, + {solution, ar_util:encode(SolutionH)}]), false; true -> ar_nonce_limiter:get_steps(PrevStepNumber, StepNumber, PrevNextSeed, @@ -2146,8 +2254,6 @@ handle_found_solution(Args, PrevB, State, IsRebase) -> Denomination2), ScheduledPricePerGiBMinute2 = ar_pricing:redenominate(ScheduledPricePerGiBMinute, Denomination, Denomination2), - CDiff = ar_difficulty:next_cumulative_diff(PrevB#block.cumulative_diff, Diff, - Height), UnsignedB = pack_block_with_transactions(#block{ nonce = Nonce, previous_block = PrevH, @@ -2264,6 +2370,31 @@ assert_key_type(RewardKey, Height) -> end end. +check_no_double_signing(CDiff, PrevCDiff, MiningAddress, Height) -> + Blocks = ar_block_cache:get_blocks_by_miner(block_cache, MiningAddress), + not lists:any( + fun(B) -> + case ar_block:get_double_signing_condition( + B#block.cumulative_diff, + B#block.previous_cumulative_diff, + CDiff, + PrevCDiff) of + true -> + ?LOG_WARNING([{event, avoiding_double_signing}, + {block, ar_util:encode(B#block.indep_hash)}, + {height, B#block.height}, + {new_height, Height}, + {cdiff, B#block.cumulative_diff}, + {prev_cdiff, B#block.previous_cumulative_diff}, + {new_cdiff, CDiff}, + {new_prev_cdiff, PrevCDiff}]), + true; + false -> + false + end + end, + Blocks). + update_solution_cache(H, Args, State) -> %% Maintain a cache of mining solutions for potential reuse in rebasing. %% @@ -2306,7 +2437,7 @@ may_be_report_double_signing(B, State) -> previous_solution_hash = PrevSolutionH2, reward_key = {_, Key}, signature = Signature2 } = CacheB, - case CDiff1 == CDiff2 orelse (CDiff1 > PrevCDiff2 andalso CDiff2 > PrevCDiff1) of + case ar_block:get_double_signing_condition(CDiff1, PrevCDiff1, CDiff2, PrevCDiff2) of true -> Preimage1 = << PrevSolutionH1/binary, (ar_block:generate_signed_hash(B))/binary >>, @@ -2364,3 +2495,249 @@ checker_test() -> ?assertEqual({3, #{ true => 3 }}, checker([true, true, true])), ?assertEqual({3, #{ true => 2, false => 1}}, checker([true, true, false])), ?assertEqual({3, #{ true => 1, false => 2}}, checker([true, false, false])). + +pick_prev_block_from_front_blocks_test_() -> + {setup, + fun() -> + %% Mock ar_nonce_limiter:get_steps/4 to return valid steps + meck:new(ar_nonce_limiter, [non_strict]), + meck:expect(ar_nonce_limiter, get_steps, fun(_, _, _, _) -> {ok, [1, 2, 3]} end) + end, + fun(_) -> + meck:unload(ar_nonce_limiter) + end, + [ + {"Test picking block with matching reward address", + fun() -> + %% Create a mining solution with step number greater than block + Solution = #mining_solution{ + mining_address = <<"addr1">>, + step_number = 2 + }, + + %% Create front blocks with different reward addresses + FrontBlocks = [ + #block{ + reward_addr = <<"addr2">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 1} + }, + #block{ + reward_addr = <<"addr1">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 1} + } + ], + + %% Call the function + {Block, Solution2} = pick_prev_block_from_front_blocks(Solution, self(), FrontBlocks, []), + ?assertEqual({ok, [1, 2, 3]}, Solution2#mining_solution.steps), + + %% Verify it picked the block with matching reward address + ?assertEqual(<<"addr1">>, Block#block.reward_addr) + end + }, + {"Test picking block from previous blocks when front blocks have higher step number", + fun() -> + %% Create a mining solution with step number less than front blocks + Solution = #mining_solution{ + mining_address = <<"addr1">>, + step_number = 1 + }, + + %% Create front blocks with step numbers higher than solution + FrontBlocks = [ + #block{ + reward_addr = <<"addr2">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 2}, + previous_block = <<"prev1">> + }, + #block{ + reward_addr = <<"addr3">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 2}, + previous_block = <<"prev2">> + } + ], + + %% Create previous blocks with valid step numbers + PrevBlocks = [ + #block{ + reward_addr = <<"addr1">>, + height = 0, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 0} + } + ], + + %% Call the function + {Block, Solution2} = pick_prev_block_from_front_blocks(Solution, self(), FrontBlocks, PrevBlocks), + ?assertEqual({ok, [1, 2, 3]}, Solution2#mining_solution.steps), + + %% Verify it picked the previous block with matching reward address + ?assertEqual(<<"addr1">>, Block#block.reward_addr) + end + }, + {"Test picking any suitable front block when no reward address matches", + fun() -> + %% Create a mining solution with step number greater than blocks + Solution = #mining_solution{ + mining_address = <<"addr1">>, + step_number = 2 + }, + + %% Create front blocks with different reward addresses but valid step numbers + FrontBlocks = [ + #block{ + reward_addr = <<"addr2">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 1} + }, + #block{ + reward_addr = <<"addr3">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 1} + } + ], + + %% Call the function + {Block, Solution2} = pick_prev_block_from_front_blocks(Solution, self(), FrontBlocks, []), + ?assertEqual({ok, [1, 2, 3]}, Solution2#mining_solution.steps), + + %% Verify it picked one of the front blocks + ?assert(lists:member(Block, FrontBlocks)) + end + }, + {"Test picking block with matching step number", + fun() -> + %% Create a mining solution with step number greater than one block + Solution = #mining_solution{ + mining_address = <<"addr1">>, + step_number = 2 + }, + + %% Create front blocks with different step numbers + FrontBlocks = [ + #block{ + reward_addr = <<"addr1">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 1} + }, + #block{ + reward_addr = <<"addr1">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 2} + }, + #block{ + reward_addr = <<"addr1">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 3} + } + ], + + %% Call the function + {Block, Solution2} = pick_prev_block_from_front_blocks(Solution, self(), FrontBlocks, []), + ?assertEqual({ok, [1, 2, 3]}, Solution2#mining_solution.steps), + + %% Verify it picked one of the blocks with step number less than solution + ?assert(Block#block.nonce_limiter_info#nonce_limiter_info.global_step_number < Solution#mining_solution.step_number) + end + }, + {"Test picking suitable previous block when front blocks and some previous blocks are not suitable", + fun() -> + %% Create a mining solution with step number greater than some blocks + Solution = #mining_solution{ + mining_address = <<"addr1">>, + step_number = 2 + }, + + %% Create front blocks with step numbers higher than solution + FrontBlocks = [ + #block{ + reward_addr = <<"addr2">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 3}, + previous_block = <<"prev1">> + }, + #block{ + reward_addr = <<"addr3">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 4}, + previous_block = <<"prev2">> + } + ], + + %% Create previous blocks - some with higher step numbers, one suitable + PrevBlocks = [ + #block{ + reward_addr = <<"addr4">>, + height = 0, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 3} + }, + #block{ + reward_addr = <<"addr5">>, + height = 0, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 1} + }, + #block{ + reward_addr = <<"addr6">>, + height = 0, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 4} + } + ], + + %% Call the function + {Block, Solution2} = pick_prev_block_from_front_blocks(Solution, self(), FrontBlocks, PrevBlocks), + ?assertEqual({ok, [1, 2, 3]}, Solution2#mining_solution.steps), + + %% Verify it picked the previous block with step number 1 + ?assertEqual(1, Block#block.nonce_limiter_info#nonce_limiter_info.global_step_number), + ?assertEqual(<<"addr5">>, Block#block.reward_addr) + end + }, + {"Test returning not_found when no suitable blocks exist", + fun() -> + %% Create a mining solution with step number less than all blocks + Solution = #mining_solution{ + mining_address = <<"addr1">>, + step_number = 1 + }, + + %% Create front blocks with step numbers higher than solution + FrontBlocks = [ + #block{ + reward_addr = <<"addr2">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 2}, + previous_block = <<"prev1">> + }, + #block{ + reward_addr = <<"addr3">>, + height = 1, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 3}, + previous_block = <<"prev2">> + } + ], + + %% Create previous blocks with step numbers higher than solution + PrevBlocks = [ + #block{ + reward_addr = <<"addr4">>, + height = 0, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 2} + }, + #block{ + reward_addr = <<"addr5">>, + height = 0, + nonce_limiter_info = #nonce_limiter_info{global_step_number = 3} + } + ], + + %% Call the function + Result = pick_prev_block_from_front_blocks(Solution, self(), FrontBlocks, PrevBlocks), + + %% Verify it returned not_found + ?assertEqual(not_found, Result) + end + } + ]}. diff --git a/apps/arweave/src/ar_pool.erl b/apps/arweave/src/ar_pool.erl index 327f39072..87734831a 100644 --- a/apps/arweave/src/ar_pool.erl +++ b/apps/arweave/src/ar_pool.erl @@ -41,11 +41,11 @@ -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). --include_lib("arweave/include/ar.hrl"). --include_lib("arweave/include/ar_config.hrl"). --include_lib("arweave/include/ar_consensus.hrl"). --include_lib("arweave/include/ar_mining.hrl"). --include_lib("arweave/include/ar_pool.hrl"). +-include("ar.hrl"). +-include("ar_config.hrl"). +-include("ar_consensus.hrl"). +-include("ar_mining.hrl"). +-include("ar_pool.hrl"). -include_lib("eunit/include/eunit.hrl"). -record(state, { diff --git a/apps/arweave/src/ar_watchdog.erl b/apps/arweave/src/ar_watchdog.erl index 8c05f13e4..b1133d44f 100644 --- a/apps/arweave/src/ar_watchdog.erl +++ b/apps/arweave/src/ar_watchdog.erl @@ -128,6 +128,10 @@ handle_cast({block_received_n_confirmations, BH, Height}, State) -> _ -> Map end; + {_BH, _Map} -> + %% The mined block was orphaned. + ar_mining_stats:block_mined_but_orphaned(), + MinedBlocks; error -> MinedBlocks end, diff --git a/apps/arweave/test/ar_data_sync_disk_pool_rotation_test.erl b/apps/arweave/test/ar_data_sync_disk_pool_rotation_test.erl new file mode 100644 index 000000000..b840d7677 --- /dev/null +++ b/apps/arweave/test/ar_data_sync_disk_pool_rotation_test.erl @@ -0,0 +1,68 @@ +-module(ar_data_sync_disk_pool_rotation_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("../include/ar.hrl"). +-include("../include/ar_consensus.hrl"). +-include("../include/ar_config.hrl"). + +-import(ar_test_node, [assert_wait_until_height/2]). + +disk_pool_rotation_test_() -> + {timeout, 120, fun test_disk_pool_rotation/0}. + +test_disk_pool_rotation() -> + ?LOG_DEBUG([{event, test_disk_pool_rotation_start}]), + Addr = ar_wallet:to_address(ar_wallet:new_keyfile()), + %% Will store the three genesis chunks. + %% The third one falls inside the "overlap" (see ar_storage_module.erl) + StorageModules = [{2 * ?DATA_CHUNK_SIZE, 0, + ar_test_node:get_default_storage_module_packing(Addr, 0)}], + Wallet = ar_test_data_sync:setup_nodes( + #{ addr => Addr, storage_modules => StorageModules }), + Chunks = [crypto:strong_rand_bytes(?DATA_CHUNK_SIZE)], + {DataRoot, DataTree} = ar_merkle:generate_tree( + ar_tx:sized_chunks_to_sized_chunk_ids( + ar_tx:chunks_to_size_tagged_chunks(Chunks) + ) + ), + {TX, Chunks} = ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot, Chunks}), + ar_test_node:assert_post_tx_to_peer(main, TX), + Offset = ?DATA_CHUNK_SIZE, + DataSize = ?DATA_CHUNK_SIZE, + DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), + Proof = #{ data_root => ar_util:encode(DataRoot), + data_path => ar_util:encode(DataPath), + chunk => ar_util:encode(hd(Chunks)), + offset => integer_to_binary(Offset), + data_size => integer_to_binary(DataSize) }, + ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, + ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), + ar_test_node:mine(main), + assert_wait_until_height(main, 1), + timer:sleep(2000), + Options = #{ format => etf, random_subset => false }, + {ok, Binary1} = ar_global_sync_record:get_serialized_sync_record(Options), + {ok, Global1} = ar_intervals:safe_from_etf(Binary1), + %% 3 genesis chunks plus the two we upload here. + ?assertEqual([{1048576, 0}], ar_intervals:to_list(Global1)), + ar_test_node:mine(main), + assert_wait_until_height(main, 2), + {ok, Binary2} = ar_global_sync_record:get_serialized_sync_record(Options), + {ok, Global2} = ar_intervals:safe_from_etf(Binary2), + ?assertEqual([{1048576, 0}], ar_intervals:to_list(Global2)), + ar_test_node:mine(main), + assert_wait_until_height(main, 3), + ar_test_node:mine(main), + assert_wait_until_height(main, 4), + %% The new chunk has been confirmed but there is not storage module to take it. + ?assertEqual(3, ?SEARCH_SPACE_UPPER_BOUND_DEPTH), + true = ar_util:do_until( + fun() -> + {ok, Binary3} = ar_global_sync_record:get_serialized_sync_record(Options), + {ok, Global3} = ar_intervals:safe_from_etf(Binary3), + [{786432, 0}] == ar_intervals:to_list(Global3) + end, + 200, + 5000 + ). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_enqueue_intervals_test.erl b/apps/arweave/test/ar_data_sync_enqueue_intervals_test.erl new file mode 100644 index 000000000..6046c0f5a --- /dev/null +++ b/apps/arweave/test/ar_data_sync_enqueue_intervals_test.erl @@ -0,0 +1,222 @@ +-module(ar_data_sync_enqueue_intervals_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("../include/ar.hrl"). +-include("../include/ar_consensus.hrl"). +-include("../include/ar_config.hrl"). + +enqueue_intervals_test() -> + ?LOG_DEBUG([{event, enqueue_intervals_test}]), + test_enqueue_intervals([], 2, [], [], [], "Empty Intervals"), + Peer1 = {1, 2, 3, 4, 1984}, + Peer2 = {101, 102, 103, 104, 1984}, + Peer3 = {201, 202, 203, 204, 1984}, + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])} + ], + 5, + [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, + {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1}, + {7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer1}, + {8*?DATA_CHUNK_SIZE, 9*?DATA_CHUNK_SIZE, Peer1} + ], + "Single peer, full intervals, all chunks. Non-overlapping QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])} + ], + 2, + [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1} + ], + "Single peer, full intervals, 2 chunks. Non-overlapping QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])}, + {Peer2, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} + ])}, + {Peer3, ar_intervals:from_list([ + {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} + ])} + ], + 2, + [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {8*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, + {5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, + {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer2}, + {7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer3} + ], + "Multiple peers, overlapping, full intervals, 2 chunks. Non-overlapping QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])}, + {Peer2, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} + ])}, + {Peer3, ar_intervals:from_list([ + {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} + ])} + ], + 3, + [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {8*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, + {5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, + {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1}, + {7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer3} + ], + "Multiple peers, overlapping, full intervals, 3 chunks. Non-overlapping QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])} + ], + 5, + [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE}], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {7*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, + {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1} + ], + "Single peer, full intervals, all chunks. Overlapping QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])}, + {Peer2, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} + ])}, + {Peer3, ar_intervals:from_list([ + {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} + ])} + ], + 2, + [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE}], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, + {5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, + {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer2} + ], + "Multiple peers, overlapping, full intervals, 2 chunks. Overlapping QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, trunc(5.75*?DATA_CHUNK_SIZE)} + ])} + ], + 2, + [ + {20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, + {trunc(8.5*?DATA_CHUNK_SIZE), trunc(6.5*?DATA_CHUNK_SIZE)} + ], + [ + {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, trunc(3.25*?DATA_CHUNK_SIZE), Peer1} + ], + "Single peer, partial intervals, 2 chunks. Overlapping partial QIntervals."), + + test_enqueue_intervals( + [ + {Peer1, ar_intervals:from_list([ + {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE}, + {9*?DATA_CHUNK_SIZE, trunc(5.75*?DATA_CHUNK_SIZE)} + ])}, + {Peer2, ar_intervals:from_list([ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {7*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ])}, + {Peer3, ar_intervals:from_list([ + {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} + ])} + ], + 2, + [ + {20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, + {trunc(8.5*?DATA_CHUNK_SIZE), trunc(6.5*?DATA_CHUNK_SIZE)} + ], + [ + {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, + {8*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} + ], + [ + {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, + {3*?DATA_CHUNK_SIZE, trunc(3.25*?DATA_CHUNK_SIZE), Peer1}, + {trunc(3.25*?DATA_CHUNK_SIZE), 4*?DATA_CHUNK_SIZE, Peer2}, + {6*?DATA_CHUNK_SIZE, trunc(6.5*?DATA_CHUNK_SIZE), Peer2} + ], + "Multiple peers, overlapping, full intervals, 2 chunks. Overlapping QIntervals."). + +test_enqueue_intervals(Intervals, ChunksPerPeer, QIntervalsRanges, ExpectedQIntervalRanges, ExpectedChunks, Label) -> + QIntervals = ar_intervals:from_list(QIntervalsRanges), + Q = gb_sets:new(), + {QResult, QIntervalsResult} = ar_data_sync:enqueue_intervals(Intervals, ChunksPerPeer, {Q, QIntervals}), + ExpectedQIntervals = lists:foldl(fun({End, Start}, Acc) -> + ar_intervals:add(Acc, End, Start) + end, QIntervals, ExpectedQIntervalRanges), + ?assertEqual(ar_intervals:to_list(ExpectedQIntervals), ar_intervals:to_list(QIntervalsResult), Label), + ?assertEqual(ExpectedChunks, gb_sets:to_list(QResult), Label). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_mines_off_only_last_chunks_test.erl b/apps/arweave/test/ar_data_sync_mines_off_only_last_chunks_test.erl new file mode 100644 index 000000000..2825d4bf6 --- /dev/null +++ b/apps/arweave/test/ar_data_sync_mines_off_only_last_chunks_test.erl @@ -0,0 +1,76 @@ +-module(ar_data_sync_mines_off_only_last_chunks_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("ar.hrl"). +-include("ar_consensus.hrl"). +-include("ar_config.hrl"). + +-import(ar_test_node, [test_with_mocked_functions/2]). + +mines_off_only_last_chunks_test_() -> + test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}, mock_reset_frequency()], + fun test_mines_off_only_last_chunks/0). + +mock_reset_frequency() -> + {ar_nonce_limiter, get_reset_frequency, fun() -> 5 end}. + +test_mines_off_only_last_chunks() -> + ?LOG_DEBUG([{event, test_mines_off_only_last_chunks_start}]), + Wallet = ar_test_data_sync:setup_nodes(), + %% Submit only the last chunks (smaller than 256 KiB) of transactions. + %% Assert the nodes construct correct proofs of access from them. + lists:foreach( + fun(Height) -> + RandomID = crypto:strong_rand_bytes(32), + Chunk = crypto:strong_rand_bytes(1023), + ChunkID = ar_tx:generate_chunk_id(Chunk), + DataSize = ?DATA_CHUNK_SIZE + 1023, + {DataRoot, DataTree} = ar_merkle:generate_tree([{RandomID, ?DATA_CHUNK_SIZE}, + {ChunkID, DataSize}]), + TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, + data_root => DataRoot }), + ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX]), + Offset = ?DATA_CHUNK_SIZE + 1, + DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), + Proof = #{ data_root => ar_util:encode(DataRoot), + data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), + offset => integer_to_binary(Offset), + data_size => integer_to_binary(DataSize) }, + ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, + ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), + case Height - ?SEARCH_SPACE_UPPER_BOUND_DEPTH of + -1 -> + %% Make sure we waited enough to have the next block use + %% the new entropy reset source. + [{_, Info}] = ets:lookup(node_state, nonce_limiter_info), + PrevStepNumber = Info#nonce_limiter_info.global_step_number, + true = ar_util:do_until( + fun() -> + ar_nonce_limiter:get_current_step_number() + > PrevStepNumber + ar_nonce_limiter:get_reset_frequency() + end, + 100, + 60000 + ); + 0 -> + %% Wait until the new chunks fall below the new upper bound and + %% remove the original big chunks. The protocol will increase the upper + %% bound based on the nonce limiter entropy reset, but ar_data_sync waits + %% for ?SEARCH_SPACE_UPPER_BOUND_DEPTH confirmations before packing the + %% chunks. + {ok, Config} = application:get_env(arweave, config), + lists:foreach( + fun(O) -> + [ar_chunk_storage:delete(O, ar_storage_module:id(Module)) + || Module <- Config#config.storage_modules] + end, + lists:seq(?DATA_CHUNK_SIZE, ar_block:strict_data_split_threshold(), + ?DATA_CHUNK_SIZE) + ); + _ -> + ok + end + end, + lists:seq(1, 6) + ). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_mines_off_only_second_last_chunks_test.erl b/apps/arweave/test/ar_data_sync_mines_off_only_second_last_chunks_test.erl new file mode 100644 index 000000000..f380cd1a7 --- /dev/null +++ b/apps/arweave/test/ar_data_sync_mines_off_only_second_last_chunks_test.erl @@ -0,0 +1,63 @@ +-module(ar_data_sync_mines_off_only_second_last_chunks_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("ar.hrl"). +-include("ar_consensus.hrl"). +-include("ar_config.hrl"). + +-import(ar_test_node, [test_with_mocked_functions/2]). + +mines_off_only_second_last_chunks_test_() -> + test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}, mock_reset_frequency()], + fun test_mines_off_only_second_last_chunks/0). + +mock_reset_frequency() -> + {ar_nonce_limiter, get_reset_frequency, fun() -> 5 end}. + +test_mines_off_only_second_last_chunks() -> + ?LOG_DEBUG([{event, test_mines_off_only_second_last_chunks_start}]), + Wallet = ar_test_data_sync:setup_nodes(), + %% Submit only the second last chunks (smaller than 256 KiB) of transactions. + %% Assert the nodes construct correct proofs of access from them. + lists:foreach( + fun(Height) -> + RandomID = crypto:strong_rand_bytes(32), + Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE div 2), + ChunkID = ar_tx:generate_chunk_id(Chunk), + DataSize = (?DATA_CHUNK_SIZE) div 2 + (?DATA_CHUNK_SIZE) div 2 + 3, + {DataRoot, DataTree} = ar_merkle:generate_tree([{ChunkID, ?DATA_CHUNK_SIZE div 2}, + {RandomID, DataSize}]), + TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, + data_root => DataRoot }), + ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX]), + Offset = 0, + DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), + Proof = #{ data_root => ar_util:encode(DataRoot), + data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), + offset => integer_to_binary(Offset), + data_size => integer_to_binary(DataSize) }, + ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, + ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), + case Height - ?SEARCH_SPACE_UPPER_BOUND_DEPTH >= 0 of + true -> + %% Wait until the new chunks fall below the new upper bound and + %% remove the original big chunks. The protocol will increase the upper + %% bound based on the nonce limiter entropy reset, but ar_data_sync waits + %% for ?SEARCH_SPACE_UPPER_BOUND_DEPTH confirmations before packing the + %% chunks. + {ok, Config} = application:get_env(arweave, config), + lists:foreach( + fun(O) -> + [ar_chunk_storage:delete(O, ar_storage_module:id(Module)) + || Module <- Config#config.storage_modules] + end, + lists:seq(?DATA_CHUNK_SIZE, ar_block:strict_data_split_threshold(), + ?DATA_CHUNK_SIZE) + ); + _ -> + ok + end + end, + lists:seq(1, 6) + ). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_recovers_from_corruption_test.erl b/apps/arweave/test/ar_data_sync_recovers_from_corruption_test.erl new file mode 100644 index 000000000..5b2461e97 --- /dev/null +++ b/apps/arweave/test/ar_data_sync_recovers_from_corruption_test.erl @@ -0,0 +1,22 @@ +-module(ar_data_sync_recovers_from_corruption_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("../include/ar.hrl"). +-include("../include/ar_consensus.hrl"). +-include("../include/ar_config.hrl"). + +-import(ar_test_node, [assert_wait_until_height/2]). + +recovers_from_corruption_test_() -> + {timeout, 300, fun test_recovers_from_corruption/0}. + +test_recovers_from_corruption() -> + ?LOG_DEBUG([{event, test_recovers_from_corruption_start}]), + ar_test_data_sync:setup_nodes(), + StoreID = ar_storage_module:id(hd(ar_storage_module:get_all(262144 * 3))), + ?debugFmt("Corrupting ~s...", [StoreID]), + [ar_chunk_storage:write_chunk(PaddedEndOffset, << 0:(262144*8) >>, #{}, StoreID) + || PaddedEndOffset <- lists:seq(262144, 262144 * 3, 262144)], + ar_test_node:mine(), + ar_test_node:assert_wait_until_height(main, 1). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_syncs_after_joining_test.erl b/apps/arweave/test/ar_data_sync_syncs_after_joining_test.erl new file mode 100644 index 000000000..4517bb4ff --- /dev/null +++ b/apps/arweave/test/ar_data_sync_syncs_after_joining_test.erl @@ -0,0 +1,44 @@ +-module(ar_data_sync_syncs_after_joining_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("ar.hrl"). +-include("ar_consensus.hrl"). +-include("ar_config.hrl"). + +-import(ar_test_node, [assert_wait_until_height/2, test_with_mocked_functions/2]). + +syncs_after_joining_test_() -> + ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> 0 end}], + fun test_syncs_after_joining/0, 240). + +test_syncs_after_joining() -> + test_syncs_after_joining(original_split). + +test_syncs_after_joining(Split) -> + ?LOG_DEBUG([{event, test_syncs_after_joining}, {split, Split}]), + Wallet = ar_test_data_sync:setup_nodes(), + {TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {Split, 1}, v2, ?AR(1)), + B1 = ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX1]), + Proofs1 = ar_test_data_sync:post_proofs(main, B1, TX1, Chunks1), + UpperBound = ar_node:get_partition_upper_bound(ar_node:get_block_index()), + ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, UpperBound), + ar_test_data_sync:wait_until_syncs_chunks(Proofs1), + ar_test_node:disconnect_from(peer1), + {MainTX2, MainChunks2} = ar_test_data_sync:tx(Wallet, {Split, 3}, v2, ?AR(1)), + MainB2 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX2]), + MainProofs2 = ar_test_data_sync:post_proofs(main, MainB2, MainTX2, MainChunks2), + {MainTX3, MainChunks3} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(1)), + MainB3 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX3]), + MainProofs3 = ar_test_data_sync:post_proofs(main, MainB3, MainTX3, MainChunks3), + {PeerTX2, PeerChunks2} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(1)), + PeerB2 = ar_test_node:post_and_mine( #{ miner => peer1, await_on => peer1 }, [PeerTX2] ), + PeerProofs2 = ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX2, PeerChunks2), + ar_test_data_sync:wait_until_syncs_chunks(peer1, PeerProofs2, infinity), + _Peer2 = ar_test_node:rejoin_on(#{ node => peer1, join_on => main }), + assert_wait_until_height(peer1, 3), + ar_test_node:connect_to_peer(peer1), + UpperBound2 = ar_node:get_partition_upper_bound(ar_node:get_block_index()), + ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs2, UpperBound2), + ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs3, UpperBound2), + ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, infinity). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_syncs_data_test.erl b/apps/arweave/test/ar_data_sync_syncs_data_test.erl new file mode 100644 index 000000000..3e015fc0f --- /dev/null +++ b/apps/arweave/test/ar_data_sync_syncs_data_test.erl @@ -0,0 +1,64 @@ +-module(ar_data_sync_syncs_data_test). + +-include_lib("eunit/include/eunit.hrl"). + +-include("ar.hrl"). +-include("ar_consensus.hrl"). +-include("ar_config.hrl"). + +-import(ar_test_node, [assert_wait_until_height/2]). + +syncs_data_test_() -> + {timeout, 240, fun test_syncs_data/0}. + +test_syncs_data() -> + ?LOG_DEBUG([{event, test_syncs_data_start}]), + Wallet = ar_test_data_sync:setup_nodes(), + Records = ar_test_data_sync:post_random_blocks(Wallet), + RecordsWithProofs = lists:flatmap( + fun({B, TX, Chunks}) -> + ar_test_data_sync:get_records_with_proofs(B, TX, Chunks) end, Records), + lists:foreach( + fun({_, _, _, {_, Proof}}) -> + ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, + ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), + ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, + ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))) + end, + RecordsWithProofs + ), + Proofs = [Proof || {_, _, _, Proof} <- RecordsWithProofs], + ar_test_data_sync:wait_until_syncs_chunks(Proofs), + DiskPoolThreshold = ar_node:get_partition_upper_bound(ar_node:get_block_index()), + ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs, DiskPoolThreshold), + lists:foreach( + fun({B, #tx{ id = TXID }, Chunks, {_, Proof}}) -> + TXSize = byte_size(binary:list_to_bin(Chunks)), + TXOffset = ar_merkle:extract_note(ar_util:decode(maps:get(tx_path, Proof))), + AbsoluteTXOffset = B#block.weave_size - B#block.block_size + TXOffset, + ExpectedOffsetInfo = ar_serialize:jsonify(#{ + offset => integer_to_binary(AbsoluteTXOffset), + size => integer_to_binary(TXSize) }), + true = ar_util:do_until( + fun() -> + case ar_test_data_sync:get_tx_offset(peer1, TXID) of + {ok, {{<<"200">>, _}, _, ExpectedOffsetInfo, _, _}} -> + true; + _ -> + false + end + end, + 100, + 120 * 1000 + ), + ExpectedData = ar_util:encode(binary:list_to_bin(Chunks)), + ar_test_node:assert_get_tx_data(main, TXID, ExpectedData), + case AbsoluteTXOffset > DiskPoolThreshold of + true -> + ok; + false -> + ar_test_node:assert_get_tx_data(peer1, TXID, ExpectedData) + end + end, + RecordsWithProofs + ). \ No newline at end of file diff --git a/apps/arweave/test/ar_data_sync_tests.erl b/apps/arweave/test/ar_data_sync_tests.erl deleted file mode 100644 index 85a26e87d..000000000 --- a/apps/arweave/test/ar_data_sync_tests.erl +++ /dev/null @@ -1,506 +0,0 @@ --module(ar_data_sync_tests). - --include_lib("eunit/include/eunit.hrl"). - --include("../include/ar.hrl"). --include("../include/ar_consensus.hrl"). --include("../include/ar_config.hrl"). - --import(ar_test_node, [assert_wait_until_height/2, test_with_mocked_functions/2]). - -recovers_from_corruption_test_() -> - {timeout, 300, fun test_recovers_from_corruption/0}. - -test_recovers_from_corruption() -> - ?LOG_DEBUG([{event, test_recovers_from_corruption_start}]), - ar_test_data_sync:setup_nodes(), - {ok, Config} = application:get_env(arweave, config), - StoreID = ar_storage_module:id(hd(ar_storage_module:get_all(262144 * 3))), - ?debugFmt("Corrupting ~s...", [StoreID]), - [ar_chunk_storage:write_chunk(PaddedEndOffset, << 0:(262144*8) >>, #{}, StoreID) - || PaddedEndOffset <- lists:seq(262144, 262144 * 3, 262144)], - ar_test_node:mine(), - ar_test_node:assert_wait_until_height(main, 1). - -syncs_data_test_() -> - {timeout, 240, fun test_syncs_data/0}. - -test_syncs_data() -> - ?LOG_DEBUG([{event, test_syncs_data_start}]), - Wallet = ar_test_data_sync:setup_nodes(), - Records = ar_test_data_sync:post_random_blocks(Wallet), - RecordsWithProofs = lists:flatmap( - fun({B, TX, Chunks}) -> - ar_test_data_sync:get_records_with_proofs(B, TX, Chunks) end, Records), - lists:foreach( - fun({_, _, _, {_, Proof}}) -> - ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, - ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), - ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, - ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))) - end, - RecordsWithProofs - ), - Proofs = [Proof || {_, _, _, Proof} <- RecordsWithProofs], - ar_test_data_sync:wait_until_syncs_chunks(Proofs), - DiskPoolThreshold = ar_node:get_partition_upper_bound(ar_node:get_block_index()), - ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs, DiskPoolThreshold), - lists:foreach( - fun({B, #tx{ id = TXID }, Chunks, {_, Proof}}) -> - TXSize = byte_size(binary:list_to_bin(Chunks)), - TXOffset = ar_merkle:extract_note(ar_util:decode(maps:get(tx_path, Proof))), - AbsoluteTXOffset = B#block.weave_size - B#block.block_size + TXOffset, - ExpectedOffsetInfo = ar_serialize:jsonify(#{ - offset => integer_to_binary(AbsoluteTXOffset), - size => integer_to_binary(TXSize) }), - true = ar_util:do_until( - fun() -> - case ar_test_data_sync:get_tx_offset(peer1, TXID) of - {ok, {{<<"200">>, _}, _, ExpectedOffsetInfo, _, _}} -> - true; - _ -> - false - end - end, - 100, - 120 * 1000 - ), - ExpectedData = ar_util:encode(binary:list_to_bin(Chunks)), - ar_test_node:assert_get_tx_data(main, TXID, ExpectedData), - case AbsoluteTXOffset > DiskPoolThreshold of - true -> - ok; - false -> - ar_test_node:assert_get_tx_data(peer1, TXID, ExpectedData) - end - end, - RecordsWithProofs - ). - -syncs_after_joining_test_() -> - ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> 0 end}], - fun test_syncs_after_joining/0, 240). - -test_syncs_after_joining() -> - test_syncs_after_joining(original_split). - -test_syncs_after_joining(Split) -> - ?LOG_DEBUG([{event, test_syncs_after_joining}, {split, Split}]), - Wallet = ar_test_data_sync:setup_nodes(), - {TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {Split, 1}, v2, ?AR(1)), - B1 = ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX1]), - Proofs1 = ar_test_data_sync:post_proofs(main, B1, TX1, Chunks1), - UpperBound = ar_node:get_partition_upper_bound(ar_node:get_block_index()), - ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, UpperBound), - ar_test_data_sync:wait_until_syncs_chunks(Proofs1), - ar_test_node:disconnect_from(peer1), - {MainTX2, MainChunks2} = ar_test_data_sync:tx(Wallet, {Split, 3}, v2, ?AR(1)), - MainB2 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX2]), - MainProofs2 = ar_test_data_sync:post_proofs(main, MainB2, MainTX2, MainChunks2), - {MainTX3, MainChunks3} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(1)), - MainB3 = ar_test_node:post_and_mine(#{ miner => main, await_on => main }, [MainTX3]), - MainProofs3 = ar_test_data_sync:post_proofs(main, MainB3, MainTX3, MainChunks3), - {PeerTX2, PeerChunks2} = ar_test_data_sync:tx(Wallet, {Split, 2}, v2, ?AR(1)), - PeerB2 = ar_test_node:post_and_mine( #{ miner => peer1, await_on => peer1 }, [PeerTX2] ), - PeerProofs2 = ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX2, PeerChunks2), - ar_test_data_sync:wait_until_syncs_chunks(peer1, PeerProofs2, infinity), - _Peer2 = ar_test_node:rejoin_on(#{ node => peer1, join_on => main }), - assert_wait_until_height(peer1, 3), - ar_test_node:connect_to_peer(peer1), - UpperBound2 = ar_node:get_partition_upper_bound(ar_node:get_block_index()), - ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs2, UpperBound2), - ar_test_data_sync:wait_until_syncs_chunks(peer1, MainProofs3, UpperBound2), - ar_test_data_sync:wait_until_syncs_chunks(peer1, Proofs1, infinity). - -mines_off_only_last_chunks_test_() -> - test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}, mock_reset_frequency()], - fun test_mines_off_only_last_chunks/0). - -mock_reset_frequency() -> - {ar_nonce_limiter, get_reset_frequency, fun() -> 5 end}. - -test_mines_off_only_last_chunks() -> - ?LOG_DEBUG([{event, test_mines_off_only_last_chunks_start}]), - Wallet = ar_test_data_sync:setup_nodes(), - %% Submit only the last chunks (smaller than 256 KiB) of transactions. - %% Assert the nodes construct correct proofs of access from them. - lists:foreach( - fun(Height) -> - RandomID = crypto:strong_rand_bytes(32), - Chunk = crypto:strong_rand_bytes(1023), - ChunkID = ar_tx:generate_chunk_id(Chunk), - DataSize = ?DATA_CHUNK_SIZE + 1023, - {DataRoot, DataTree} = ar_merkle:generate_tree([{RandomID, ?DATA_CHUNK_SIZE}, - {ChunkID, DataSize}]), - TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, - data_root => DataRoot }), - ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX]), - Offset = ?DATA_CHUNK_SIZE + 1, - DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), - Proof = #{ data_root => ar_util:encode(DataRoot), - data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), - offset => integer_to_binary(Offset), - data_size => integer_to_binary(DataSize) }, - ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, - ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), - case Height - ?SEARCH_SPACE_UPPER_BOUND_DEPTH of - -1 -> - %% Make sure we waited enough to have the next block use - %% the new entropy reset source. - [{_, Info}] = ets:lookup(node_state, nonce_limiter_info), - PrevStepNumber = Info#nonce_limiter_info.global_step_number, - true = ar_util:do_until( - fun() -> - ar_nonce_limiter:get_current_step_number() - > PrevStepNumber + ar_nonce_limiter:get_reset_frequency() - end, - 100, - 60000 - ); - 0 -> - %% Wait until the new chunks fall below the new upper bound and - %% remove the original big chunks. The protocol will increase the upper - %% bound based on the nonce limiter entropy reset, but ar_data_sync waits - %% for ?SEARCH_SPACE_UPPER_BOUND_DEPTH confirmations before packing the - %% chunks. - {ok, Config} = application:get_env(arweave, config), - lists:foreach( - fun(O) -> - [ar_chunk_storage:delete(O, ar_storage_module:id(Module)) - || Module <- Config#config.storage_modules] - end, - lists:seq(?DATA_CHUNK_SIZE, ar_block:strict_data_split_threshold(), - ?DATA_CHUNK_SIZE) - ); - _ -> - ok - end - end, - lists:seq(1, 6) - ). - -mines_off_only_second_last_chunks_test_() -> - test_with_mocked_functions([{ar_fork, height_2_6, fun() -> 0 end}, mock_reset_frequency()], - fun test_mines_off_only_second_last_chunks/0). - -test_mines_off_only_second_last_chunks() -> - ?LOG_DEBUG([{event, test_mines_off_only_second_last_chunks_start}]), - Wallet = ar_test_data_sync:setup_nodes(), - %% Submit only the second last chunks (smaller than 256 KiB) of transactions. - %% Assert the nodes construct correct proofs of access from them. - lists:foreach( - fun(Height) -> - RandomID = crypto:strong_rand_bytes(32), - Chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE div 2), - ChunkID = ar_tx:generate_chunk_id(Chunk), - DataSize = (?DATA_CHUNK_SIZE) div 2 + (?DATA_CHUNK_SIZE) div 2 + 3, - {DataRoot, DataTree} = ar_merkle:generate_tree([{ChunkID, ?DATA_CHUNK_SIZE div 2}, - {RandomID, DataSize}]), - TX = ar_test_node:sign_tx(Wallet, #{ last_tx => ar_test_node:get_tx_anchor(main), data_size => DataSize, - data_root => DataRoot }), - ar_test_node:post_and_mine(#{ miner => main, await_on => peer1 }, [TX]), - Offset = 0, - DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), - Proof = #{ data_root => ar_util:encode(DataRoot), - data_path => ar_util:encode(DataPath), chunk => ar_util:encode(Chunk), - offset => integer_to_binary(Offset), - data_size => integer_to_binary(DataSize) }, - ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, - ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), - case Height - ?SEARCH_SPACE_UPPER_BOUND_DEPTH >= 0 of - true -> - %% Wait until the new chunks fall below the new upper bound and - %% remove the original big chunks. The protocol will increase the upper - %% bound based on the nonce limiter entropy reset, but ar_data_sync waits - %% for ?SEARCH_SPACE_UPPER_BOUND_DEPTH confirmations before packing the - %% chunks. - {ok, Config} = application:get_env(arweave, config), - lists:foreach( - fun(O) -> - [ar_chunk_storage:delete(O, ar_storage_module:id(Module)) - || Module <- Config#config.storage_modules] - end, - lists:seq(?DATA_CHUNK_SIZE, ar_block:strict_data_split_threshold(), - ?DATA_CHUNK_SIZE) - ); - _ -> - ok - end - end, - lists:seq(1, 6) - ). - -disk_pool_rotation_test_() -> - {timeout, 120, fun test_disk_pool_rotation/0}. - -test_disk_pool_rotation() -> - ?LOG_DEBUG([{event, test_disk_pool_rotation_start}]), - Addr = ar_wallet:to_address(ar_wallet:new_keyfile()), - %% Will store the three genesis chunks. - %% The third one falls inside the "overlap" (see ar_storage_module.erl) - StorageModules = [{2 * ?DATA_CHUNK_SIZE, 0, - ar_test_node:get_default_storage_module_packing(Addr, 0)}], - Wallet = ar_test_data_sync:setup_nodes( - #{ addr => Addr, storage_modules => StorageModules }), - Chunks = [crypto:strong_rand_bytes(?DATA_CHUNK_SIZE)], - {DataRoot, DataTree} = ar_merkle:generate_tree( - ar_tx:sized_chunks_to_sized_chunk_ids( - ar_tx:chunks_to_size_tagged_chunks(Chunks) - ) - ), - {TX, Chunks} = ar_test_data_sync:tx(Wallet, {fixed_data, DataRoot, Chunks}), - ar_test_node:assert_post_tx_to_peer(main, TX), - Offset = ?DATA_CHUNK_SIZE, - DataSize = ?DATA_CHUNK_SIZE, - DataPath = ar_merkle:generate_path(DataRoot, Offset, DataTree), - Proof = #{ data_root => ar_util:encode(DataRoot), - data_path => ar_util:encode(DataPath), - chunk => ar_util:encode(hd(Chunks)), - offset => integer_to_binary(Offset), - data_size => integer_to_binary(DataSize) }, - ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, - ar_test_node:post_chunk(main, ar_serialize:jsonify(Proof))), - ar_test_node:mine(main), - assert_wait_until_height(main, 1), - timer:sleep(2000), - Options = #{ format => etf, random_subset => false }, - {ok, Binary1} = ar_global_sync_record:get_serialized_sync_record(Options), - {ok, Global1} = ar_intervals:safe_from_etf(Binary1), - %% 3 genesis chunks plus the two we upload here. - ?assertEqual([{1048576, 0}], ar_intervals:to_list(Global1)), - ar_test_node:mine(main), - assert_wait_until_height(main, 2), - {ok, Binary2} = ar_global_sync_record:get_serialized_sync_record(Options), - {ok, Global2} = ar_intervals:safe_from_etf(Binary2), - ?assertEqual([{1048576, 0}], ar_intervals:to_list(Global2)), - ar_test_node:mine(main), - assert_wait_until_height(main, 3), - ar_test_node:mine(main), - assert_wait_until_height(main, 4), - %% The new chunk has been confirmed but there is not storage module to take it. - ?assertEqual(3, ?SEARCH_SPACE_UPPER_BOUND_DEPTH), - true = ar_util:do_until( - fun() -> - {ok, Binary3} = ar_global_sync_record:get_serialized_sync_record(Options), - {ok, Global3} = ar_intervals:safe_from_etf(Binary3), - [{786432, 0}] == ar_intervals:to_list(Global3) - end, - 200, - 5000 - ). - -enqueue_intervals_test() -> - ?LOG_DEBUG([{event, enqueue_intervals_test}]), - test_enqueue_intervals([], 2, [], [], [], "Empty Intervals"), - Peer1 = {1, 2, 3, 4, 1984}, - Peer2 = {101, 102, 103, 104, 1984}, - Peer3 = {201, 202, 203, 204, 1984}, - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])} - ], - 5, - [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, - {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1}, - {7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer1}, - {8*?DATA_CHUNK_SIZE, 9*?DATA_CHUNK_SIZE, Peer1} - ], - "Single peer, full intervals, all chunks. Non-overlapping QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])} - ], - 2, - [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1} - ], - "Single peer, full intervals, 2 chunks. Non-overlapping QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])}, - {Peer2, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} - ])}, - {Peer3, ar_intervals:from_list([ - {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} - ])} - ], - 2, - [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {8*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, - {5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, - {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer2}, - {7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer3} - ], - "Multiple peers, overlapping, full intervals, 2 chunks. Non-overlapping QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])}, - {Peer2, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} - ])}, - {Peer3, ar_intervals:from_list([ - {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} - ])} - ], - 3, - [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {8*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, - {5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, - {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1}, - {7*?DATA_CHUNK_SIZE, 8*?DATA_CHUNK_SIZE, Peer3} - ], - "Multiple peers, overlapping, full intervals, 3 chunks. Non-overlapping QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])} - ], - 5, - [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE}], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {7*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, - {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer1} - ], - "Single peer, full intervals, all chunks. Overlapping QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])}, - {Peer2, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} - ])}, - {Peer3, ar_intervals:from_list([ - {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} - ])} - ], - 2, - [{20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, {9*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE}], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {7*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE, Peer1}, - {5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Peer2}, - {6*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE, Peer2} - ], - "Multiple peers, overlapping, full intervals, 2 chunks. Overlapping QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, trunc(5.75*?DATA_CHUNK_SIZE)} - ])} - ], - 2, - [ - {20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, - {trunc(8.5*?DATA_CHUNK_SIZE), trunc(6.5*?DATA_CHUNK_SIZE)} - ], - [ - {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, trunc(3.25*?DATA_CHUNK_SIZE), Peer1} - ], - "Single peer, partial intervals, 2 chunks. Overlapping partial QIntervals."), - - test_enqueue_intervals( - [ - {Peer1, ar_intervals:from_list([ - {trunc(3.25*?DATA_CHUNK_SIZE), 2*?DATA_CHUNK_SIZE}, - {9*?DATA_CHUNK_SIZE, trunc(5.75*?DATA_CHUNK_SIZE)} - ])}, - {Peer2, ar_intervals:from_list([ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {7*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ])}, - {Peer3, ar_intervals:from_list([ - {8*?DATA_CHUNK_SIZE, 7*?DATA_CHUNK_SIZE} - ])} - ], - 2, - [ - {20*?DATA_CHUNK_SIZE, 10*?DATA_CHUNK_SIZE}, - {trunc(8.5*?DATA_CHUNK_SIZE), trunc(6.5*?DATA_CHUNK_SIZE)} - ], - [ - {4*?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE}, - {8*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} - ], - [ - {2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE, Peer1}, - {3*?DATA_CHUNK_SIZE, trunc(3.25*?DATA_CHUNK_SIZE), Peer1}, - {trunc(3.25*?DATA_CHUNK_SIZE), 4*?DATA_CHUNK_SIZE, Peer2}, - {6*?DATA_CHUNK_SIZE, trunc(6.5*?DATA_CHUNK_SIZE), Peer2} - ], - "Multiple peers, overlapping, full intervals, 2 chunks. Overlapping QIntervals."). - -test_enqueue_intervals(Intervals, ChunksPerPeer, QIntervalsRanges, ExpectedQIntervalRanges, ExpectedChunks, Label) -> - QIntervals = ar_intervals:from_list(QIntervalsRanges), - Q = gb_sets:new(), - {QResult, QIntervalsResult} = ar_data_sync:enqueue_intervals(Intervals, ChunksPerPeer, {Q, QIntervals}), - ExpectedQIntervals = lists:foldl(fun({End, Start}, Acc) -> - ar_intervals:add(Acc, End, Start) - end, QIntervals, ExpectedQIntervalRanges), - ?assertEqual(ar_intervals:to_list(ExpectedQIntervals), ar_intervals:to_list(QIntervalsResult), Label), - ?assertEqual(ExpectedChunks, gb_sets:to_list(QResult), Label). - diff --git a/apps/arweave/test/ar_test_data_sync.erl b/apps/arweave/test/ar_test_data_sync.erl index 00c01295e..3a48b68fd 100644 --- a/apps/arweave/test/ar_test_data_sync.erl +++ b/apps/arweave/test/ar_test_data_sync.erl @@ -34,7 +34,7 @@ setup_nodes2(#{ peer_addr := PeerAddr } = Options) -> {B0, Options2} = case maps:get(b0, Options, not_set) of not_set -> - [Genesis] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(200000), <<>>}]), + [Genesis] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(200000), <<>>}], ar_retarget:switch_to_linear_diff(2)), {Genesis, Options#{ b0 => Genesis }}; Value -> {Value, Options} diff --git a/testnet/config/testnet-4.json b/testnet/config/testnet-4.json index dbc5c03b5..ef88236d7 100644 --- a/testnet/config/testnet-4.json +++ b/testnet/config/testnet-4.json @@ -6,6 +6,7 @@ "1226,100000000000,hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w" ], "mining_addr": "hDdptPiuAlrP5RxEHwZoffm7obIyvvBi40T5PPvp57w", + "peers": ["testnet-1.arweave.xyz", "testnet-2.arweave.xyz"], "vdf_client_peers": [ "testnet-3.arweave.xyz" ], @@ -20,4 +21,4 @@ ], "data_dir": "/arweave-data", "requests_per_minute_limit": 9000 -} \ No newline at end of file +}