diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 122ede81f..e5d575898 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -22,11 +22,11 @@ jobs: needs: build runs-on: self-hosted strategy: - fail-fast: true + fail-fast: false max-parallel: 12 matrix: core_test_mod: [ - ## Long-running tests. Put these first to limit the overall runtime of the + ## Long-running tests. Put these first to limit the overall runtime of the ## test suite ar_coordinated_mining_tests, ar_data_sync_tests, diff --git a/apps/arweave/include/ar_data_sync.hrl b/apps/arweave/include/ar_data_sync.hrl index a1818d842..29e867b8c 100644 --- a/apps/arweave/include/ar_data_sync.hrl +++ b/apps/arweave/include/ar_data_sync.hrl @@ -211,5 +211,7 @@ %% fragmentation. store_chunk_queue_threshold = ?STORE_CHUNK_QUEUE_FLUSH_SIZE_THRESHOLD, %% Cache mapping peers to /data_sync_record responses - all_peers_intervals = #{} + all_peers_intervals = #{}, + %% List of local peers used to check if we need to skip block verification. + local_peers = [] }). diff --git a/apps/arweave/src/ar_data_sync.erl b/apps/arweave/src/ar_data_sync.erl index 9d3a0e5c4..43854661c 100644 --- a/apps/arweave/src/ar_data_sync.erl +++ b/apps/arweave/src/ar_data_sync.erl @@ -656,7 +656,8 @@ init({"default" = StoreID, _}) -> weave_size = maps:get(weave_size, StateMap), disk_pool_cursor = first, disk_pool_threshold = DiskPoolThreshold, - store_id = StoreID + store_id = StoreID, + local_peers = Config#config.local_peers }, timer:apply_interval(?REMOVE_EXPIRED_DATA_ROOTS_FREQUENCY_MS, ?MODULE, remove_expired_disk_pool_data_roots, []), @@ -691,18 +692,20 @@ init({StoreID, RepackInPlacePacking}) -> process_flag(trap_exit, true), [ok, ok] = ar_events:subscribe([node_state, disksup]), State = init_kv(StoreID), + {ok, Config} = application:get_env(arweave, config), + State2 = State#sync_data_state{local_peers = Config#config.local_peers}, case RepackInPlacePacking of none -> gen_server:cast(self(), process_store_chunk_queue), {RangeStart, RangeEnd} = ar_storage_module:get_range(StoreID), - State2 = State#sync_data_state{ + State3 = State2#sync_data_state{ store_id = StoreID, range_start = RangeStart, range_end = RangeEnd }, - {ok, may_be_start_syncing(State2)}; + {ok, may_be_start_syncing(State3)}; _ -> - {ok, State} + {ok, State2} end. handle_cast({move_data_root_index, Cursor, N}, State) -> @@ -1087,6 +1090,7 @@ handle_cast({store_fetched_chunk, Peer, Byte, Proof} = Cast, State) -> Offset = SeekByte - BlockStartOffset, ValidateDataPathRuleset = ar_poa:get_data_path_validation_ruleset(BlockStartOffset, get_merkle_rebase_threshold()), + IsLocalPeer = lists:member(Peer, State#sync_data_state.local_peers), case validate_proof(TXRoot, BlockStartOffset, Offset, BlockSize, Proof, ValidateDataPathRuleset) of {need_unpacking, AbsoluteOffset, ChunkArgs, VArgs} -> @@ -1098,6 +1102,8 @@ handle_cast({store_fetched_chunk, Peer, Byte, Proof} = Cast, State) -> true -> decrement_chunk_cache_size(), {noreply, State}; + false when IsLocalPeer -> + process_valid_fetched_chunk(ChunkArgs, Args, State); false -> case ar_packing_server:is_buffer_full() of true -> @@ -1115,8 +1121,9 @@ handle_cast({store_fetched_chunk, Peer, Byte, Proof} = Cast, State) -> {AbsoluteOffset, unpacked}}), {noreply, State#sync_data_state{ packing_map = PackingMap#{ - {AbsoluteOffset, unpacked} => {unpack_fetched_chunk, - Args} } }} + {AbsoluteOffset, unpacked} => {unpack_fetched_chunk, Args} + } + }} end end; false -> @@ -1877,8 +1884,11 @@ get_tx_offset_data_in_range2(TXOffsetIndex, TXIndex, Start, End) -> get_tx_data(Start, End, Chunks) when Start >= End -> {ok, iolist_to_binary(Chunks)}; get_tx_data(Start, End, Chunks) -> - case get_chunk(Start + 1, #{ pack => true, packing => unpacked, - bucket_based_offset => false }) of + case get_chunk(Start + 1, #{ + pack => true, + packing => unpacked, + bucket_based_offset => false + }) of {ok, #{ chunk := Chunk }} -> get_tx_data(Start + byte_size(Chunk), End, [Chunks | Chunk]); {error, chunk_not_found} -> @@ -2829,7 +2839,7 @@ process_invalid_fetched_chunk(Peer, Byte, State) -> process_valid_fetched_chunk(ChunkArgs, Args, State) -> #sync_data_state{ store_id = StoreID } = State, - {Packing, UnpackedChunk, AbsoluteEndOffset, TXRoot, ChunkSize} = ChunkArgs, + {FetchedPacking, FetchedChunk, AbsoluteEndOffset, TXRoot, ChunkSize} = ChunkArgs, {AbsoluteTXStartOffset, TXSize, DataPath, TXPath, DataRoot, Chunk, _ChunkID, ChunkEndOffset, Peer, Byte} = Args, case is_chunk_proof_ratio_attractive(ChunkSize, TXSize, DataPath) of @@ -2845,11 +2855,19 @@ process_valid_fetched_chunk(ChunkArgs, Args, State) -> %% The chunk has been synced by another job already. decrement_chunk_cache_size(), {noreply, State}; + false when FetchedPacking =/= unpacked -> + %% we don't have unpacked chunk, so possible repack is needed + true = AbsoluteEndOffset == AbsoluteTXStartOffset + ChunkEndOffset, + pack_and_store_chunk({DataRoot, AbsoluteEndOffset, TXPath, TXRoot, + DataPath, FetchedPacking, ChunkEndOffset, ChunkSize, Chunk, + none, none, none}, State); false -> + %% process unpacked chunkgst + %% true = AbsoluteEndOffset == AbsoluteTXStartOffset + ChunkEndOffset, pack_and_store_chunk({DataRoot, AbsoluteEndOffset, TXPath, TXRoot, - DataPath, Packing, ChunkEndOffset, ChunkSize, Chunk, - UnpackedChunk, none, none}, State) + DataPath, FetchedPacking, ChunkEndOffset, ChunkSize, Chunk, + FetchedChunk, none, none}, State) end end. diff --git a/apps/arweave/src/ar_http_iface_middleware.erl b/apps/arweave/src/ar_http_iface_middleware.erl index 8af797a77..41513d5e6 100644 --- a/apps/arweave/src/ar_http_iface_middleware.erl +++ b/apps/arweave/src/ar_http_iface_middleware.erl @@ -190,7 +190,7 @@ handle(<<"GET">>, [<<"recent">>], Req, _Pid) -> true -> {200, #{}, ar_serialize:jsonify(ar_info:get_recent()), Req} end; - + handle(<<"GET">>, [<<"is_tx_blacklisted">>, EncodedTXID], Req, _Pid) -> case ar_util:safe_decode(EncodedTXID) of {error, invalid} -> @@ -2008,13 +2008,16 @@ handle_get_chunk(OffsetBinary, Req, Encoding) -> {Packing, ok}; {{true, _}, _StoreID} -> {ok, Config} = application:get_env(arweave, config), - case lists:member(pack_served_chunks, Config#config.enable) of - false -> - {none, {reply, {404, #{}, <<>>, Req}}}; + IsPackServedChunks = lists:member(pack_served_chunks, Config#config.enable), + Peer = ar_http_util:arweave_peer(Req), + IsLocalPeerAddr = lists:member(Peer, Config#config.local_peers), + + case IsPackServedChunks orelse IsLocalPeerAddr of true -> - ok = ar_semaphore:acquire(get_and_pack_chunk, - infinity), - {RequestedPacking, ok} + ok = ar_semaphore:acquire(get_and_pack_chunk, infinity), + {RequestedPacking, ok}; + false -> + {none, {reply, {404, #{}, <<>>, Req}}} end end, case CheckRecords of @@ -2022,7 +2025,7 @@ handle_get_chunk(OffsetBinary, Req, Encoding) -> Reply; ok -> Args = #{ packing => ReadPacking, - bucket_based_offset => IsBucketBasedOffset }, + bucket_based_offset => IsBucketBasedOffset, pack => true }, case ar_data_sync:get_chunk(Offset, Args) of {ok, Proof} -> Proof2 = maps:remove(unpacked_chunk, diff --git a/apps/arweave/src/ar_mining_stats.erl b/apps/arweave/src/ar_mining_stats.erl index c5ab5b101..c81e4a2db 100644 --- a/apps/arweave/src/ar_mining_stats.erl +++ b/apps/arweave/src/ar_mining_stats.erl @@ -38,7 +38,7 @@ current_h1_from_peer_hps = 0.0, total_h2_to_peer = 0, total_h2_from_peer = 0, - + partitions = [], peers = [] }). @@ -277,7 +277,7 @@ get_average_samples_by_time(Key, Now) -> AvgSamples. get_average_by_time(Key, Now) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> {0.0, 0.0}; [{_, Start, _Samples, _Count}] when Now - Start =:= 0 -> @@ -288,7 +288,7 @@ get_average_by_time(Key, Now) -> end. get_average_by_samples(Key) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> 0.0; [{_, _Start, Samples, _Count}] when Samples == 0 -> @@ -299,7 +299,7 @@ get_average_by_samples(Key) -> get_count(Key) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> 0; [{_, _Start, _Samples, Count}] -> @@ -307,7 +307,7 @@ get_count(Key) -> end. get_start(Key) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> undefined; [{_, Start, _Samples, _Count}] -> @@ -318,15 +318,15 @@ get_packing() -> {ok, Config} = application:get_env(arweave, config), MiningAddress = Config#config.mining_addr, Pattern1 = { - partition, '_', storage_module, '_', packing, + partition, '_', storage_module, '_', packing, {spora_2_6, MiningAddress} }, Pattern2 = { partition, '_', storage_module, '_', packing, {composite, MiningAddress, '_'} }, - - Results = + + Results = ets:match_object(?MODULE, {Pattern1, '_'}) ++ ets:match_object(?MODULE, {Pattern2, '_'}), @@ -336,9 +336,9 @@ get_packing() -> case sets:to_list(PackingsSet) of [SinglePacking] -> SinglePacking; - [] -> + [] -> % No results found - undefined; + undefined; MultiplePackings -> % More than one unique packing found ?LOG_WARNING([ @@ -436,7 +436,7 @@ get_hash_hps(PoA1Multiplier, Packing, PartitionNumber, TotalCurrent, Now) -> %% @doc calculate the maximum hash rate (in MiB per second read from disk) for the given VDF %% speed at the current weave size. optimal_partition_read_mibps(_Packing, undefined, _PartitionDataSize, _TotalDataSize, _WeaveSize) -> - 0.0; + 0.0; optimal_partition_read_mibps(Packing, VDFSpeed, PartitionDataSize, TotalDataSize, WeaveSize) -> PackingDifficulty = get_packing_difficulty(Packing), RecallRangeSize = ar_block:get_recall_range_size(PackingDifficulty) / ?MiB, @@ -447,7 +447,7 @@ optimal_partition_read_mibps(Packing, VDFSpeed, PartitionDataSize, TotalDataSize %% @doc calculate the maximum hash rate (in hashes per second) for the given VDF speed %% at the current weave size. optimal_partition_hash_hps(_PoA1Multiplier, undefined, _PartitionDataSize, _TotalDataSize, _WeaveSize) -> - 0.0; + 0.0; optimal_partition_hash_hps(PoA1Multiplier, VDFSpeed, PartitionDataSize, TotalDataSize, WeaveSize) -> BasePartitionHashes = (400.0 / VDFSpeed) * min(1.0, (PartitionDataSize / ?PARTITION_SIZE)), H1Optimal = BasePartitionHashes / PoA1Multiplier, @@ -536,10 +536,10 @@ generate_partition_report( reset_count({partition, PartitionNumber, h1, current}, Now), reset_count({partition, PartitionNumber, h2, current}, Now), - Report#report{ - optimal_overall_read_mibps = + Report#report{ + optimal_overall_read_mibps = OptimalOverallRead + PartitionReport#partition_report.optimal_read_mibps, - optimal_overall_hash_hps = + optimal_overall_hash_hps = OptimalOverallHash + PartitionReport#partition_report.optimal_hash_hps, average_read_mibps = AverageRead + PartitionReport#partition_report.average_read_mibps, current_read_mibps = CurrentRead + PartitionReport#partition_report.current_read_mibps, @@ -697,11 +697,11 @@ format_report(Report, WeaveSize) -> ), PartitionTable = format_partition_report(Report, WeaveSize), PeerTable = format_peer_report(Report), - + io_lib:format("\n~s~s~s", [Preamble, PartitionTable, PeerTable]). format_partition_report(Report, WeaveSize) -> - Header = + Header = "Local mining stats:\n" "+-----------+-----------+----------+---------------+---------------+---------------+------------+------------+--------------+\n" "| Partition | Data Size | % of Max | Read (Cur) | Read (Avg) | Read (Ideal) | Hash (Cur) | Hash (Avg) | Hash (Ideal) |\n" @@ -762,7 +762,7 @@ format_partition_row(PartitionReport) -> format_peer_report(#report{ peers = [] }) -> ""; format_peer_report(Report) -> - Header = + Header = "\n" "Coordinated mining cluster stats:\n" "+----------------------+--------------+--------------+-------------+-------------+--------+--------+\n" @@ -809,8 +809,8 @@ format_peer_row(PeerReport) -> "| ~20s | ~8B h/s | ~8B h/s | ~7B h/s | ~7B h/s | ~6B | ~6B |\n", [ ar_util:format_peer(Peer), - floor(CurrentH1To), floor(AverageH1To), - floor(CurrentH1From), floor(AverageH1From), + floor(CurrentH1To), floor(AverageH1To), + floor(CurrentH1From), floor(AverageH1From), TotalH2To, TotalH2From ]). @@ -862,12 +862,12 @@ test_local_stats(Fun, Stat) -> timer:sleep(1000), Fun(1, 1), Fun(1, 1), - + Fun(2, 1), TotalStart2 = get_start({partition, 2, Stat, total}), CurrentStart2 = get_start({partition, 2, Stat, current}), Fun(2, 1), - + ?assert(TotalStart1 /= TotalStart2), ?assert(CurrentStart1 /= CurrentStart2), ?assertEqual(0.0, get_average_count_by_time({partition, 1, Stat, total}, TotalStart1)), @@ -1055,7 +1055,7 @@ do_test_data_size_stats(Mining, Packing) -> ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({?PARTITION_SIZE, 2, Packing}), Packing, 2, ?PARTITION_SIZE, 2, 53), - + ?assertEqual(Mining, get_packing()), ?assertEqual(336, get_partition_data_size(1, Mining)), ?assertEqual(52, get_partition_data_size(2, Mining)), @@ -1077,9 +1077,9 @@ test_peer_stats(Fun, Stat) -> ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), - Peer1 = ar_test_node:peer_ip(peer1), - Peer2 = ar_test_node:peer_ip(peer2), - Peer3 = ar_test_node:peer_ip(peer3), + Peer1 = ar_test_node:peer_addr(peer1), + Peer2 = ar_test_node:peer_addr(peer2), + Peer3 = ar_test_node:peer_addr(peer3), Fun(Peer1, 10), TotalStart1 = get_start({peer, Peer1, Stat, total}), @@ -1087,12 +1087,12 @@ test_peer_stats(Fun, Stat) -> timer:sleep(1000), Fun(Peer1, 5), Fun(Peer1, 15), - + Fun(Peer2, 1), TotalStart2 = get_start({peer, Peer2, Stat, total}), CurrentStart2 = get_start({peer, Peer2, Stat, current}), Fun(Peer2, 19), - + ?assert(TotalStart1 /= TotalStart2), ?assert(CurrentStart1 /= CurrentStart2), ?assertEqual(0.0, get_average_count_by_time({peer, Peer1, Stat, total}, TotalStart1)), @@ -1174,9 +1174,9 @@ test_h2_peer_stats() -> ar_mining_stats:pause_performance_reports(120000), reset_all_stats(), - Peer1 = ar_test_node:peer_ip(peer1), - Peer2 = ar_test_node:peer_ip(peer2), - Peer3 = ar_test_node:peer_ip(peer3), + Peer1 = ar_test_node:peer_addr(peer1), + Peer2 = ar_test_node:peer_addr(peer2), + Peer3 = ar_test_node:peer_addr(peer3), ar_mining_stats:h2_sent_to_peer(Peer1), ar_mining_stats:h2_sent_to_peer(Peer1), @@ -1247,23 +1247,23 @@ test_optimal_stats(Packing, PoA1Multiplier) -> 2 -> 0.0625 end, - ?assertEqual(0.0, + ?assertEqual(0.0, optimal_partition_read_mibps( Packing, undefined, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(RecallRangeSize * 2, + ?assertEqual(RecallRangeSize * 2, optimal_partition_read_mibps( Packing, 1.0, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(RecallRangeSize, + ?assertEqual(RecallRangeSize, optimal_partition_read_mibps( Packing, 2.0, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(RecallRangeSize / 2, + ?assertEqual(RecallRangeSize / 2, optimal_partition_read_mibps( Packing, 1.0, floor(0.25 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(RecallRangeSize * 1.6, + ?assertEqual(RecallRangeSize * 1.6, optimal_partition_read_mibps( Packing, 1.0, ?PARTITION_SIZE, floor(6 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), @@ -1273,23 +1273,23 @@ test_optimal_stats(Packing, PoA1Multiplier) -> 2 -> {600.0, 300.0, 150.0, 440.0} end, - ?assertEqual(0.0, + ?assertEqual(0.0, optimal_partition_hash_hps( PoA1Multiplier, undefined, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(FullWeave, + ?assertEqual(FullWeave, optimal_partition_hash_hps( PoA1Multiplier, 1.0, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(SlowVDF, + ?assertEqual(SlowVDF, optimal_partition_hash_hps( PoA1Multiplier, 2.0, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(SmallPartition, + ?assertEqual(SmallPartition, optimal_partition_hash_hps( PoA1Multiplier, 1.0, floor(0.25 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(SmallWeave, + ?assertEqual(SmallWeave, optimal_partition_hash_hps( PoA1Multiplier, 1.0, ?PARTITION_SIZE, floor(6 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))). @@ -1345,8 +1345,8 @@ test_report(Mining, Packing, PoA1Multiplier) -> {?PARTITION_SIZE, 2, Mining}, {?PARTITION_SIZE, 2, Packing} ], - - try + + try application:set_env(arweave, config, Config#config{ storage_modules = StorageModules, @@ -1359,9 +1359,9 @@ test_report(Mining, Packing, PoA1Multiplier) -> {2, MiningAddress, 0}, {3, MiningAddress, 0} ], - Peer1 = ar_test_node:peer_ip(peer1), - Peer2 = ar_test_node:peer_ip(peer2), - Peer3 = ar_test_node:peer_ip(peer3), + Peer1 = ar_test_node:peer_addr(peer1), + Peer2 = ar_test_node:peer_addr(peer2), + Peer3 = ar_test_node:peer_addr(peer3), Peers = [Peer1, Peer2, Peer3], Now = erlang:monotonic_time(millisecond), @@ -1379,7 +1379,7 @@ test_report(Mining, Packing, PoA1Multiplier) -> ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({floor(0.2 * ?PARTITION_SIZE), 8, Mining}), Mining, 1, floor(0.2 * ?PARTITION_SIZE), 8, - floor(0.05 * ?PARTITION_SIZE)), + floor(0.05 * ?PARTITION_SIZE)), ar_mining_stats:set_storage_module_data_size( ar_storage_module:id({?PARTITION_SIZE, 2, Mining}), Mining, 2, ?PARTITION_SIZE, 2, floor(0.25 * ?PARTITION_SIZE)), @@ -1417,7 +1417,7 @@ test_report(Mining, Packing, PoA1Multiplier) -> ar_mining_stats:h2_received_from_peer(Peer2), ar_mining_stats:h2_received_from_peer(Peer2), ar_mining_stats:h2_received_from_peer(Peer2), - + Report1 = generate_report(0, [], [], WeaveSize, Now+1000), ?assertEqual(#report{ now = Now+1000 }, Report1), log_report(format_report(Report1, WeaveSize)), @@ -1434,13 +1434,13 @@ test_report(Mining, Packing, PoA1Multiplier) -> 2 -> {5.5, 3.5, 2.0, 403.19957427982445, 235.19959144596214, 167.9999828338623} end, - ?assertEqual(#report{ + ?assertEqual(#report{ now = Now+1000, vdf_speed = 1.0 / 3.0, h1_solution = 1, h2_solution = 2, confirmed_block = 1, - total_data_size = + total_data_size = floor(0.1 * ?PARTITION_SIZE) + floor(0.2 * ?PARTITION_SIZE) + floor(0.05 * ?PARTITION_SIZE) + floor(0.25 * ?PARTITION_SIZE), optimal_overall_read_mibps = 0.9539990386963382 * 2 * RecallRangeSize, diff --git a/apps/arweave/src/ar_packing_server.erl b/apps/arweave/src/ar_packing_server.erl index 8c849a3f8..d17d102fd 100644 --- a/apps/arweave/src/ar_packing_server.erl +++ b/apps/arweave/src/ar_packing_server.erl @@ -186,7 +186,7 @@ get_randomx_state_by_difficulty(PackingDifficulty, PackingState) -> init([]) -> {ok, Config} = application:get_env(arweave, config), - + ar:console("~nInitialising RandomX dataset for fast packing. Key: ~p. " "The process may take several minutes.~n", [ar_util:encode(?RANDOMX_PACKING_KEY)]), {RandomXState512, _RandomXState4096} = PackingState = init_packing_state(), @@ -219,7 +219,7 @@ init([]) -> end, {ConfiguredRate, SchedulersRequired2} end, - + record_packing_benchmarks(TheoreticalMaxRate, PackingRate, Schedulers, ActualRatePack2_6, ActualRatePackComposite), SpawnSchedulers = min(SchedulersRequired, Schedulers), diff --git a/apps/arweave/src/ar_serialize.erl b/apps/arweave/src/ar_serialize.erl index 75e9630f6..272add24a 100644 --- a/apps/arweave/src/ar_serialize.erl +++ b/apps/arweave/src/ar_serialize.erl @@ -382,7 +382,7 @@ binary_to_block_time_history(_Rest, _BlockTimeHistory) -> %% Note: the #nonce_limiter_update and #vdf_session records are only serialized for communication %% between a VDF server and VDF client. Only fields that are required for this communication are %% serialized. -%% +%% %% For example, the vdf_difficulty and next_vdf_difficulty fields are omitted as they are only used %% by nodes that compute their own VDF and never need to be shared from VDF server to VDF client. nonce_limiter_update_to_binary(2 = _Format, #nonce_limiter_update{ @@ -611,7 +611,7 @@ binary_to_nonce_limiter_update_response(_Bin) -> {error, invalid2}. binary_to_nonce_limiter_update_response( - SessionFoundBin, StepNumberSize, StepNumber, Postpone, Format) + SessionFoundBin, StepNumberSize, StepNumber, Postpone, Format) when SessionFoundBin == 0; SessionFoundBin == 1 -> SessionFound = case SessionFoundBin of 0 -> false; 1 -> true end, StepNumber2 = case StepNumberSize of 0 -> undefined; _ -> StepNumber end, @@ -1545,7 +1545,7 @@ json_struct_to_tx(TXStruct, ComputeDataSize) -> }. json_list_to_diff_pair(List) -> - [PoA1DiffBin, DiffBin] = + [PoA1DiffBin, DiffBin] = case List of undefined -> [<<"0">>, <<"0">>]; _ -> List @@ -1553,7 +1553,7 @@ json_list_to_diff_pair(List) -> PoA1Diff = ar_util:binary_to_integer(PoA1DiffBin), Diff = ar_util:binary_to_integer(DiffBin), {PoA1Diff, Diff}. - + parse_data_size(1, _TXStruct, Data, true) -> byte_size(Data); parse_data_size(_Format, TXStruct, _Data, _ComputeDataSize) -> @@ -2049,7 +2049,7 @@ jobs_to_json_struct(Jobs) -> #jobs{ jobs = JobList, partial_diff = PartialDiff, seed = Seed, next_seed = NextSeed, interval_number = IntervalNumber, next_vdf_difficulty = NextVDFDiff } = Jobs, - + {[{jobs, [job_to_json_struct(Job) || Job <- JobList]}, {partial_diff, diff_pair_to_json_list(PartialDiff)}, {seed, ar_util:encode(Seed)}, diff --git a/apps/arweave/test/ar_coordinated_mining_tests.erl b/apps/arweave/test/ar_coordinated_mining_tests.erl index 9d79c9c08..a428f1f3c 100644 --- a/apps/arweave/test/ar_coordinated_mining_tests.erl +++ b/apps/arweave/test/ar_coordinated_mining_tests.erl @@ -53,7 +53,7 @@ test_single_node_one_chunk() -> {ok, B} = http_get_block(element(1, hd(BI)), ValidatorNode), ?assert(byte_size((B#block.poa)#poa.data_path) > 0), assert_empty_cache(Node). - + %% @doc One-node coordinated mining cluster mining a block with two chunks. test_single_node_two_chunk() -> [Node, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(1), @@ -77,7 +77,7 @@ test_two_node_retarget() -> %% @doc Three-node coordinated mining cluster mining until all nodes have contributed %% to a solution. This test does not force cross-node solutions. test_three_node() -> - [Node1, Node2, Node3, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(3), + [Node1, Node2, Node3, _ExitNode, ValidatorNode] = ar_test_node:start_coordinated(3), wait_for_each_node([Node1, Node2, Node3], ValidatorNode, 0, [0, 2, 4]), assert_empty_cache(Node1), assert_empty_cache(Node2), @@ -115,54 +115,54 @@ test_no_exit_node() -> test_no_secret() -> [Node, _ExitNode, _ValidatorNode] = ar_test_node:start_coordinated(1), - Peer = ar_test_node:peer_ip(Node), + Peer = ar_test_node:peer_addr(Node), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:get_cm_partition_table(Peer)), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h1_send(Peer, dummy_candidate())), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h2_send(Peer, dummy_candidate())), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_publish_send(Peer, dummy_solution())). test_bad_secret() -> [Node, _ExitNode, _ValidatorNode] = ar_test_node:start_coordinated(1), - Peer = ar_test_node:peer_ip(Node), + Peer = ar_test_node:peer_addr(Node), {ok, Config} = application:get_env(arweave, config), ok = application:set_env(arweave, config, Config#config{ cm_api_secret = <<"this_is_not_the_actual_secret">> }), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:get_cm_partition_table(Peer)), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h1_send(Peer, dummy_candidate())), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_h2_send(Peer, dummy_candidate())), ?assertMatch( - {error, {ok, {{<<"421">>, _}, _, + {error, {ok, {{<<"421">>, _}, _, <<"CM API disabled or invalid CM API secret in request.">>, _, _}}}, ar_http_iface_client:cm_publish_send(Peer, dummy_solution())). test_partition_table() -> [B0] = ar_weave:init([], ar_test_node:get_difficulty_for_invalid_hash(), 5 * ?PARTITION_SIZE), Config = ar_test_node:base_cm_config([]), - + MiningAddr = Config#config.mining_addr, RandomAddress = crypto:strong_rand_bytes(32), - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), %% No partitions ar_test_node:start_node(B0, Config, false), @@ -173,7 +173,7 @@ test_partition_table() -> ), %% Partition jumble with 2 addresses - ar_test_node:start_node(B0, Config#config{ + ar_test_node:start_node(B0, Config#config{ storage_modules = [ {?PARTITION_SIZE, 0, {spora_2_6, MiningAddr}}, {?PARTITION_SIZE, 0, {spora_2_6, RandomAddress}}, @@ -206,7 +206,7 @@ test_partition_table() -> %% Simulate mining start PartitionUpperBound = 35 * ?PARTITION_SIZE, %% less than the highest configured partition ar_mining_io:set_largest_seen_upper_bound(PartitionUpperBound), - + ?assertEqual( {ok, [ {0, ?PARTITION_SIZE, MiningAddr, 0}, @@ -225,14 +225,14 @@ test_peers_by_partition() -> [B0] = ar_weave:init([], ar_test_node:get_difficulty_for_invalid_hash(), PartitionUpperBound), - Peer1 = ar_test_node:peer_ip(peer1), - Peer2 = ar_test_node:peer_ip(peer2), - Peer3 = ar_test_node:peer_ip(peer3), + Peer1 = ar_test_node:peer_addr(peer1), + Peer2 = ar_test_node:peer_addr(peer2), + Peer3 = ar_test_node:peer_addr(peer3), BaseConfig = ar_test_node:base_cm_config([]), Config = BaseConfig#config{ cm_exit_peer = Peer1 }, MiningAddr = Config#config.mining_addr, - + ar_test_node:remote_call(peer1, ar_test_node, start_node, [B0, Config#config{ cm_exit_peer = not_set, cm_peers = [Peer2, Peer3], @@ -336,7 +336,7 @@ test_peers_by_partition() -> assert_peers([Peer2], peer3, 3), assert_peers([Peer1], peer3, 4), assert_peers([Peer1], peer3, 5), - ok. + ok. %% -------------------------------------------------------------------- %% Helpers @@ -383,8 +383,8 @@ wait_for_cross_node(_Miners, _ValidatorNode, _CurrentHeight, ExpectedPartitions, wait_for_cross_node(Miners, ValidatorNode, CurrentHeight, ExpectedPartitions, RetryCount) -> A = mine_in_parallel(Miners, ValidatorNode, CurrentHeight), Partitions = sets:from_list(A), - MinedCrossNodeBlock = - sets:is_subset(Partitions, ExpectedPartitions) andalso + MinedCrossNodeBlock = + sets:is_subset(Partitions, ExpectedPartitions) andalso sets:is_subset(ExpectedPartitions, Partitions), case MinedCrossNodeBlock of true -> @@ -393,7 +393,7 @@ wait_for_cross_node(Miners, ValidatorNode, CurrentHeight, ExpectedPartitions, Re wait_for_cross_node( Miners, ValidatorNode, CurrentHeight+1, ExpectedPartitions, RetryCount-1) end. - + mine_in_parallel(Miners, ValidatorNode, CurrentHeight) -> ar_util:pmap(fun(Node) -> ar_test_node:mine(Node) end, Miners), [{Hash, _, _} | _] = ar_test_node:wait_until_height(ValidatorNode, CurrentHeight + 1), @@ -409,13 +409,13 @@ mine_in_parallel(Miners, ValidatorNode, CurrentHeight) -> {ok, Block} = ar_test_node:http_get_block(Hash, ValidatorNode), case Block#block.recall_byte2 of - undefined -> + undefined -> [ ar_node:get_partition_number(Block#block.recall_byte) ]; RecallByte2 -> [ - ar_node:get_partition_number(Block#block.recall_byte), + ar_node:get_partition_number(Block#block.recall_byte), ar_node:get_partition_number(RecallByte2) ] end. diff --git a/apps/arweave/test/ar_fork_recovery_tests.erl b/apps/arweave/test/ar_fork_recovery_tests.erl index dc2949125..b98d5f26b 100644 --- a/apps/arweave/test/ar_fork_recovery_tests.erl +++ b/apps/arweave/test/ar_fork_recovery_tests.erl @@ -146,7 +146,7 @@ test_invalid_block_with_high_cumulative_difficulty() -> ?debugFmt("Fake block: ~s.", [ar_util:encode(B2H)]), ok = ar_events:subscribe(block), ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, - ar_http_iface_client:send_block_binary(ar_test_node:peer_ip(main), B2#block.indep_hash, + ar_http_iface_client:send_block_binary(ar_test_node:peer_addr(main), B2#block.indep_hash, ar_serialize:block_to_binary(B2))), receive {event, block, {rejected, invalid_cumulative_difficulty, B2H, _Peer2}} -> @@ -167,7 +167,7 @@ test_invalid_block_with_high_cumulative_difficulty() -> ?assertNotEqual(B2#block.indep_hash, H3), {_Peer, B3, _Time, _Size} = ar_http_iface_client:get_block_shadow(1, - ar_test_node:peer_ip(peer1), + ar_test_node:peer_addr(peer1), binary, #{}), ?assertEqual(H2, B3#block.indep_hash). @@ -224,10 +224,10 @@ fake_block_with_strong_cumulative_difficulty(B, PrevB, CDiff) -> fork_recovery_test_() -> {timeout, 300, fun test_fork_recovery/0}. - + test_fork_recovery() -> test_fork_recovery(original_split). - + test_fork_recovery(Split) -> Wallet = ar_test_data_sync:setup_nodes(), {TX1, Chunks1} = ar_test_data_sync:tx(Wallet, {Split, 13}, v2, ?AR(10)), @@ -294,4 +294,3 @@ fake_block_with_strong_cumulative_difficulty(B, PrevB, CDiff) -> UpperBound3 = ar_node:get_partition_upper_bound(ar_node:get_block_index()), ar_test_data_sync:wait_until_syncs_chunks(Proofs4, UpperBound3), ar_test_data_sync:post_proofs(peer1, PeerB2, PeerTX2, PeerChunks2). - diff --git a/apps/arweave/test/ar_http_iface_tests.erl b/apps/arweave/test/ar_http_iface_tests.erl index 35a356f29..4f9138c6c 100644 --- a/apps/arweave/test/ar_http_iface_tests.erl +++ b/apps/arweave/test/ar_http_iface_tests.erl @@ -69,7 +69,7 @@ node_blacklisting_post_spammer_test_() -> %% @doc Check that we can qickly get the local time from the peer. get_time_test() -> Now = os:system_time(second), - {ok, {Min, Max}} = ar_http_iface_client:get_time(ar_test_node:peer_ip(main), 10 * 1000), + {ok, {Min, Max}} = ar_http_iface_client:get_time(ar_test_node:peer_addr(main), 10 * 1000), ?assert(Min < Now), ?assert(Now < Max). @@ -186,7 +186,7 @@ test_addresses_with_checksum({_, Wallet1, {_, Pub2}, _}) -> ?assertEqual(ar_util:encode(TX2#tx.target), ServeTXTarget). get_balance(EncodedAddr) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ @@ -198,7 +198,7 @@ get_balance(EncodedAddr) -> binary_to_integer(Reply). get_last_tx(EncodedAddr) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ @@ -210,7 +210,7 @@ get_last_tx(EncodedAddr) -> Reply. get_price(EncodedAddr) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ @@ -222,7 +222,7 @@ get_price(EncodedAddr) -> binary_to_integer(Reply). get_tx(ID) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {_, _, _, _, Port} = Peer, {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ @@ -236,23 +236,23 @@ get_tx(ID) -> %% @doc Ensure that server info can be retreived via the HTTP interface. test_get_info(_) -> ?assertEqual(info_unavailable, - ar_http_iface_client:get_info(ar_test_node:peer_ip(main), bad_key)), + ar_http_iface_client:get_info(ar_test_node:peer_addr(main), bad_key)), ?assertEqual(<>, - ar_http_iface_client:get_info(ar_test_node:peer_ip(main), network)), + ar_http_iface_client:get_info(ar_test_node:peer_addr(main), network)), ?assertEqual(?RELEASE_NUMBER, - ar_http_iface_client:get_info(ar_test_node:peer_ip(main), release)), + ar_http_iface_client:get_info(ar_test_node:peer_addr(main), release)), ?assertEqual( ?CLIENT_VERSION, - ar_http_iface_client:get_info(ar_test_node:peer_ip(main), version)), - ?assertEqual(1, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), peers)), + ar_http_iface_client:get_info(ar_test_node:peer_addr(main), version)), + ?assertEqual(1, ar_http_iface_client:get_info(ar_test_node:peer_addr(main), peers)), ar_util:do_until( fun() -> - 1 == ar_http_iface_client:get_info(ar_test_node:peer_ip(main), blocks) + 1 == ar_http_iface_client:get_info(ar_test_node:peer_addr(main), blocks) end, 100, 2000 ), - ?assertEqual(1, ar_http_iface_client:get_info(ar_test_node:peer_ip(main), height)). + ?assertEqual(1, ar_http_iface_client:get_info(ar_test_node:peer_addr(main), height)). %% @doc Ensure that transactions are only accepted once. test_single_regossip(_) -> @@ -260,22 +260,22 @@ test_single_regossip(_) -> TX = ar_tx:new(), ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, - ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), TX#tx.id, + ar_http_iface_client:send_tx_json(ar_test_node:peer_addr(main), TX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))) ), ?assertMatch( {ok, {{<<"200">>, _}, _, _, _, _}}, - ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_binary, [ar_test_node:peer_ip(peer1), TX#tx.id, + ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_binary, [ar_test_node:peer_addr(peer1), TX#tx.id, ar_serialize:tx_to_binary(TX)]) ), ?assertMatch( {ok, {{<<"208">>, _}, _, _, _, _}}, - ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_binary, [ar_test_node:peer_ip(peer1), TX#tx.id, + ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_binary, [ar_test_node:peer_addr(peer1), TX#tx.id, ar_serialize:tx_to_binary(TX)]) ), ?assertMatch( {ok, {{<<"208">>, _}, _, _, _, _}}, - ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_json, [ar_test_node:peer_ip(peer1), TX#tx.id, + ar_test_node:remote_call(peer1, ar_http_iface_client, send_tx_json, [ar_test_node:peer_addr(peer1), TX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))]) ). @@ -300,13 +300,13 @@ test_node_blacklisting_post_spammer() -> -spec get_fun_msg_pair(atom()) -> {fun(), any()}. get_fun_msg_pair(get_info) -> { fun(_) -> - ar_http_iface_client:get_info(ar_test_node:peer_ip(main)) + ar_http_iface_client:get_info(ar_test_node:peer_addr(main)) end , info_unavailable}; get_fun_msg_pair(send_tx_binary) -> { fun(_) -> InvalidTX = (ar_tx:new())#tx{ owner = <<"key">>, signature = <<"invalid">> }, - case ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), + case ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), InvalidTX#tx.id, ar_serialize:tx_to_binary(InvalidTX)) of {ok, {{<<"429">>, <<"Too Many Requests">>}, _, @@ -372,7 +372,7 @@ test_get_balance({B0, _, _, {_, Pub1}}) -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet/" ++ Addr ++ "/balance" }), ?assertEqual(?AR(10), binary_to_integer(Body)), @@ -380,7 +380,7 @@ test_get_balance({B0, _, _, {_, Pub1}}) -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet_list/" ++ RootHash ++ "/" ++ Addr ++ "/balance" }), ar_test_node:mine(), @@ -388,7 +388,7 @@ test_get_balance({B0, _, _, {_, Pub1}}) -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet_list/" ++ RootHash ++ "/" ++ Addr ++ "/balance" }). @@ -400,7 +400,7 @@ test_get_wallet_list_in_chunks({B0, {_, Pub1}, {_, Pub2}, {_, StaticPub}}) -> {ok, {{<<"404">>, _}, _, <<"Root hash not found.">>, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet_list/" ++ NonExistentRootHash }), @@ -417,7 +417,7 @@ test_get_wallet_list_in_chunks({B0, {_, Pub1}, {_, Pub2}, {_, StaticPub}}) -> {ok, {{<<"200">>, _}, _, Body1, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet_list/" ++ RootHash }), Cursor = maps:get(next_cursor, binary_to_term(Body1)), @@ -429,7 +429,7 @@ test_get_wallet_list_in_chunks({B0, {_, Pub1}, {_, Pub2}, {_, StaticPub}}) -> {ok, {{<<"200">>, _}, _, Body2, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet_list/" ++ RootHash ++ "/" ++ ar_util:encode(Cursor) }), ?assertEqual(#{ @@ -439,10 +439,10 @@ test_get_wallet_list_in_chunks({B0, {_, Pub1}, {_, Pub2}, {_, StaticPub}}) -> %% @doc Test that heights are returned correctly. test_get_height(_) -> - 0 = ar_http_iface_client:get_height(ar_test_node:peer_ip(main)), + 0 = ar_http_iface_client:get_height(ar_test_node:peer_addr(main)), ar_test_node:mine(), wait_until_height(1), - 1 = ar_http_iface_client:get_height(ar_test_node:peer_ip(main)). + 1 = ar_http_iface_client:get_height(ar_test_node:peer_addr(main)). %% @doc Test that last tx associated with a wallet can be fetched. test_get_last_tx_single({_, _, _, {_, StaticPub}}) -> @@ -450,7 +450,7 @@ test_get_last_tx_single({_, _, _, {_, StaticPub}}) -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet/" ++ Addr ++ "/last_tx" }), ?assertEqual(<<"TEST_ID">>, ar_util:decode(Body)). @@ -458,20 +458,20 @@ test_get_last_tx_single({_, _, _, {_, StaticPub}}) -> %% @doc Ensure that blocks can be received via a hash. test_get_block_by_hash({B0, _, _, _}) -> {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(B0#block.indep_hash, - ar_test_node:peer_ip(main), binary, #{}), + ar_test_node:peer_addr(main), binary, #{}), TXIDs = [TX#tx.id || TX <- B0#block.txs], ?assertEqual(B0#block{ size_tagged_txs = unset, account_tree = undefined, txs = TXIDs, reward_history = [], block_time_history = [] }, B1). %% @doc Ensure that blocks can be received via a height. test_get_block_by_height({B0, _, _, _}) -> - {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(0, ar_test_node:peer_ip(main), binary, #{}), + {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(0, ar_test_node:peer_addr(main), binary, #{}), TXIDs = [TX#tx.id || TX <- B0#block.txs], ?assertEqual(B0#block{ size_tagged_txs = unset, account_tree = undefined, txs = TXIDs, reward_history = [], block_time_history = [] }, B1). test_get_current_block({B0, _, _, _}) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {ok, BI} = ar_http_iface_client:get_block_index(Peer, 0, 100), {_Peer, B1, _Time, _Size} = ar_http_iface_client:get_block_shadow(hd(BI), Peer, binary, #{}), @@ -479,7 +479,7 @@ test_get_current_block({B0, _, _, _}) -> ?assertEqual(B0#block{ size_tagged_txs = unset, txs = TXIDs, reward_history = [], block_time_history = [], account_tree = undefined }, B1), {ok, {{<<"200">>, _}, _, Body, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/current" }), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/current" }), {JSONStruct} = jiffy:decode(Body), ?assertEqual(ar_util:encode(B0#block.indep_hash), proplists:get_value(<<"indep_hash">>, JSONStruct)). @@ -488,24 +488,24 @@ test_get_current_block({B0, _, _, _}) -> %% correctly if the block cannot be found. test_get_non_existent_block(_) -> {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/height/100" }), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/height/100" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/height/100" }), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block2/height/100" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block/hash/abcd" }), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/hash/abcd" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/block2/hash/abcd" }), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block2/hash/abcd" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/height/101/wallet_list" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/hash/abcd/wallet_list" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/height/101/hash_list" }), {ok, {{<<"404">>, _}, _, _, _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/block/hash/abcd/hash_list" }). %% @doc A test for retrieving format=2 transactions from HTTP API. @@ -520,19 +520,19 @@ test_get_format_2_tx(_) -> EncodedTXID = binary_to_list(ar_util:encode(TXID)), EncodedInvalidTXID = binary_to_list(ar_util:encode(InvalidTXID)), EncodedEmptyTXID = binary_to_list(ar_util:encode(EmptyTXID)), - ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), ValidTX#tx.id, + ar_http_iface_client:send_tx_json(ar_test_node:peer_addr(main), ValidTX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(ValidTX))), {ok, {{<<"400">>, _}, _, <<"The attached data is split in an unknown way.">>, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx", body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(InvalidDataRootTX)) }), - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), InvalidDataRootTX#tx.id, ar_serialize:tx_to_binary(InvalidDataRootTX#tx{ data = <<>> })), - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), EmptyTX#tx.id, + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), EmptyTX#tx.id, ar_serialize:tx_to_binary(EmptyTX)), wait_until_receives_txs([ValidTX, EmptyTX, InvalidDataRootTX]), ar_test_node:mine(), @@ -542,7 +542,7 @@ test_get_format_2_tx(_) -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ EncodedTXID }), ?assertEqual(ValidTX#tx{ @@ -555,21 +555,21 @@ test_get_format_2_tx(_) -> {ok, {{<<"404">>, _}, _, _, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ EncodedInvalidTXID ++ "/data" }), %% Ensure /tx/[ID]/data works for format=2 transactions when the data is empty. {ok, {{<<"200">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ EncodedEmptyTXID ++ "/data" }), %% Ensure data can be fetched for format=2 transactions via /tx/[ID]/data.html. {ok, {{<<"200">>, _}, Headers, HTMLData, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ EncodedTXID ++ "/data.html" }), ?assertEqual(<<"DATA">>, HTMLData), @@ -582,7 +582,7 @@ test_get_format_1_tx(_) -> LocalHeight = ar_node:get_height(), TX = #tx{ id = TXID } = ar_tx:new(<<"DATA">>), EncodedTXID = binary_to_list(ar_util:encode(TXID)), - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), @@ -592,7 +592,7 @@ test_get_format_1_tx(_) -> fun() -> case ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ EncodedTXID }) of {ok, {{<<"404">>, _}, _, _, _, _}} -> @@ -618,7 +618,7 @@ test_add_external_tx_with_tags(_) -> {<<"TEST_TAG2">>, <<"TEST_VAL2">>} ] }, - ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), TaggedTX#tx.id, + ar_http_iface_client:send_tx_json(ar_test_node:peer_addr(main), TaggedTX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TaggedTX))), wait_until_receives_txs([TaggedTX]), ar_test_node:mine(), @@ -633,7 +633,7 @@ test_add_external_tx_with_tags(_) -> test_find_external_tx(_) -> LocalHeight = ar_node:get_height(), TX = ar_tx:new(<<"DATA">>), - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), @@ -641,7 +641,7 @@ test_find_external_tx(_) -> {ok, FoundTXID} = ar_util:do_until( fun() -> - case ar_http_iface_client:get_tx([ar_test_node:peer_ip(main)], TX#tx.id) of + case ar_http_iface_client:get_tx([ar_test_node:peer_addr(main)], TX#tx.id) of not_found -> false; TX -> @@ -664,7 +664,7 @@ test_add_tx_and_get_last({_B0, Wallet1, Wallet2, _StaticWallet}) -> quantity => ?AR(2), reward => ?AR(1)}), ID = SignedTX#tx.id, - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), SignedTX#tx.id, + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), SignedTX#tx.id, ar_serialize:tx_to_binary(SignedTX)), wait_until_receives_txs([SignedTX]), ar_test_node:mine(), @@ -672,7 +672,7 @@ test_add_tx_and_get_last({_B0, Wallet1, Wallet2, _StaticWallet}) -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet/" ++ binary_to_list(ar_util:encode(ar_wallet:to_address(Pub1))) ++ "/last_tx" @@ -683,7 +683,7 @@ test_add_tx_and_get_last({_B0, Wallet1, Wallet2, _StaticWallet}) -> test_get_subfields_of_tx(_) -> LocalHeight = ar_node:get_height(), TX = ar_tx:new(<<"DATA">>), - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), @@ -695,13 +695,13 @@ test_get_subfields_of_tx(_) -> %% @doc Correctly check the status of pending is returned for a pending transaction test_get_pending_tx(_) -> TX = ar_tx:new(<<"DATA1">>), - ar_http_iface_client:send_tx_json(ar_test_node:peer_ip(main), TX#tx.id, + ar_http_iface_client:send_tx_json(ar_test_node:peer_addr(main), TX#tx.id, ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TX))), wait_until_receives_txs([TX]), {ok, {{<<"202">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TX#tx.id)) }), ?assertEqual(<<"Pending">>, Body). @@ -727,7 +727,7 @@ test_get_tx_status(_) -> FetchStatus = fun() -> ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TX#tx.id)) ++ "/status" }) end, @@ -777,7 +777,7 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet" }), {ok, Config} = application:get_env(arweave, config), @@ -786,14 +786,14 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet", headers => [{<<"X-Internal-Api-Secret">>, <<"incorrect_secret">>}] }), {ok, {{<<"200">>, <<"OK">>}, _, CreateWalletBody, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/wallet", headers => [{<<"X-Internal-Api-Secret">>, <<"correct_secret">>}] }), @@ -811,7 +811,7 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {{<<"200">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx", body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(TopUpTX)) }), @@ -832,7 +832,7 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/unsigned_tx", body => ar_serialize:jsonify({UnsignedTXProps}) }), @@ -841,7 +841,7 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {{<<"421">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/unsigned_tx", headers => [{<<"X-Internal-Api-Secret">>, <<"incorrect_secret">>}], body => ar_serialize:jsonify({UnsignedTXProps}) @@ -849,7 +849,7 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {{<<"200">>, <<"OK">>}, _, Body, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/unsigned_tx", headers => [{<<"X-Internal-Api-Secret">>, <<"correct_secret">>}], body => ar_serialize:jsonify({UnsignedTXProps}) @@ -863,7 +863,7 @@ test_post_unsigned_tx({_B0, Wallet1, _Wallet2, _StaticWallet}) -> {ok, {_, _, GetTXBody, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ binary_to_list(TXID) ++ "/status" }), {GetTXRes} = ar_serialize:dejsonify(GetTXBody), @@ -880,7 +880,7 @@ test_get_error_of_data_limit(_) -> LocalHeight = ar_node:get_height(), Limit = 1460, TX = ar_tx:new(<< <<0>> || _ <- lists:seq(1, Limit * 2) >>), - ar_http_iface_client:send_tx_binary(ar_test_node:peer_ip(main), TX#tx.id, + ar_http_iface_client:send_tx_binary(ar_test_node:peer_addr(main), TX#tx.id, ar_serialize:tx_to_binary(TX)), wait_until_receives_txs([TX]), ar_test_node:mine(), @@ -889,7 +889,7 @@ test_get_error_of_data_limit(_) -> Resp = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TX#tx.id)) ++ "/data", limit => Limit }), @@ -933,23 +933,23 @@ test_get_recent_hash_list_diff({_B0, Wallet1, _Wallet2, _StaticWallet}) -> BTip = ar_node:get_current_block(), ar_test_node:disconnect_from(peer1), {ok, {{<<"404">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", + peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => <<>> }), {ok, {{<<"400">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", + peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => crypto:strong_rand_bytes(47) }), {ok, {{<<"404">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", + peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => crypto:strong_rand_bytes(48) }), B0H = BTip#block.indep_hash, {ok, {{<<"200">>, _}, _, B0H, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/recent_hash_list_diff", + peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => B0H }), ar_test_node:mine(), BI1 = wait_until_height(LocalHeight + 1), {B1H, _, _} = hd(BI1), {ok, {{<<"200">>, _}, _, << B0H:48/binary, B1H:48/binary, 0:16 >> , _, _}} = - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => B0H }), TXs = [ar_test_node:sign_tx(main, Wallet1, #{ last_tx => ar_test_node:get_tx_anchor(peer1) }) || _ <- lists:seq(1, 3)], lists:foreach(fun(TX) -> ar_test_node:assert_post_tx_to_peer(main, TX) end, TXs), @@ -959,16 +959,16 @@ test_get_recent_hash_list_diff({_B0, Wallet1, _Wallet2, _StaticWallet}) -> [TXID1, TXID2, TXID3] = [TX#tx.id || TX <- (ar_node:get_current_block())#block.txs], {ok, {{<<"200">>, _}, _, << B0H:48/binary, B1H:48/binary, 0:16, B2H:48/binary, 3:16, TXID1:32/binary, TXID2:32/binary, TXID3/binary >> , _, _}} - = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + = ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => B0H }), {ok, {{<<"200">>, _}, _, << B0H:48/binary, B1H:48/binary, 0:16, B2H:48/binary, 3:16, TXID1:32/binary, TXID2:32/binary, TXID3/binary >> , _, _}} - = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + = ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => << B0H/binary, (crypto:strong_rand_bytes(48))/binary >>}), {ok, {{<<"200">>, _}, _, << B1H:48/binary, B2H:48/binary, 3:16, TXID1:32/binary, TXID2:32/binary, TXID3/binary >> , _, _}} - = ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), + = ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/recent_hash_list_diff", headers => [], body => << B0H/binary, B1H/binary, (crypto:strong_rand_bytes(48))/binary >>}). @@ -986,14 +986,14 @@ test_get_total_supply(_Args) -> ), TotalSupplyBin = integer_to_binary(TotalSupply), ?assertMatch({ok, {{<<"200">>, _}, _, TotalSupplyBin, _, _}}, - ar_http:req(#{ method => get, peer => ar_test_node:peer_ip(main), path => "/total_supply" })). + ar_http:req(#{ method => get, peer => ar_test_node:peer_addr(main), path => "/total_supply" })). wait_until_syncs_tx_data(TXID) -> ar_util:do_until( fun() -> case ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), + peer => ar_test_node:peer_addr(main), path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/data" }) of {ok, {{<<"404">>, _}, _, _, _, _}} -> diff --git a/apps/arweave/test/ar_info_tests.erl b/apps/arweave/test/ar_info_tests.erl index 4e7214f1e..f9bf27990 100644 --- a/apps/arweave/test/ar_info_tests.erl +++ b/apps/arweave/test/ar_info_tests.erl @@ -33,7 +33,7 @@ test_recent_blocks(Type) -> <<"received">> => <<"pending">>, <<"height">> => 0 }], - ?assertEqual(GenesisBlock, get_recent(ar_test_node:peer_ip(peer1), blocks)), + ?assertEqual(GenesisBlock, get_recent(ar_test_node:peer_addr(peer1), blocks)), TargetHeight = ?CHECKPOINT_DEPTH+2, PeerBI = lists:foldl( @@ -46,8 +46,8 @@ test_recent_blocks(Type) -> ), %% Peer1 recent has no timestamps since it hasn't received any of its own blocks %% gossipped back - ?assertEqual(expected_blocks(peer1, PeerBI, true), - get_recent(ar_test_node:peer_ip(peer1), blocks)), + ?assertEqual(expected_blocks(peer1, PeerBI, true), + get_recent(ar_test_node:peer_addr(peer1), blocks)), %% Share blocks to peer1 lists:foreach( @@ -56,7 +56,7 @@ test_recent_blocks(Type) -> B = ar_test_node:remote_call(peer1, ar_block_cache, get, [block_cache, H]), case Type of post -> - ar_test_node:send_new_block(ar_test_node:peer_ip(peer1), B); + ar_test_node:send_new_block(ar_test_node:peer_addr(peer1), B); announcement -> Announcement = #block_announcement{ indep_hash = H, previous_block = B#block.previous_block, @@ -65,7 +65,7 @@ test_recent_blocks(Type) -> solution_hash = B#block.hash, tx_prefixes = [] }, ar_http_iface_client:send_block_announcement( - ar_test_node:peer_ip(peer1), Announcement) + ar_test_node:peer_addr(peer1), Announcement) end end, %% Reverse the list so that the peer receives the blocks in the same order they @@ -75,9 +75,9 @@ test_recent_blocks(Type) -> %% Peer1 recent should now have timestamps, but also black out the most recent %% ones. - ?assertEqual(expected_blocks(peer1, PeerBI), - get_recent(ar_test_node:peer_ip(peer1), blocks)). - + ?assertEqual(expected_blocks(peer1, PeerBI), + get_recent(ar_test_node:peer_addr(peer1), blocks)). + expected_blocks(Node, BI) -> expected_blocks(Node, BI, false). expected_blocks(Node, BI, ForcePending) -> @@ -179,7 +179,7 @@ test_recent_forks() -> ar_test_node:start_peer(peer2, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:connect_to_peer(peer2), - + %% Mine a few blocks, shared by both peers ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 1), @@ -203,7 +203,7 @@ test_recent_forks() -> height = 4, block_ids = Orphans1 }, - + ar_test_node:mine(peer2), ar_test_node:wait_until_height(peer2, 4), ar_test_node:mine(peer2), @@ -252,23 +252,23 @@ test_recent_forks() -> ar_test_node:disconnect_from(peer1), ar_test_node:disconnect_from(peer2), - assert_forks_json_equal([Fork2, Fork1], get_recent(ar_test_node:peer_ip(peer1), forks)), + assert_forks_json_equal([Fork2, Fork1], get_recent(ar_test_node:peer_addr(peer1), forks)), ok. assert_forks_json_equal(ExpectedForks) -> - assert_forks_json_equal(ExpectedForks, get_recent(ar_test_node:peer_ip(main), forks)). + assert_forks_json_equal(ExpectedForks, get_recent(ar_test_node:peer_addr(main), forks)). assert_forks_json_equal(ExpectedForks, ActualForks) -> - ExpectedForksStripped = [ + ExpectedForksStripped = [ #{ <<"id">> => ar_util:encode(Fork#fork.id), <<"height">> => Fork#fork.height, <<"blocks">> => [ ar_util:encode(BlockID) || BlockID <- Fork#fork.block_ids ] - } + } || Fork <- ExpectedForks], ActualForksStripped = [ maps:remove(<<"timestamp">>, Fork) || Fork <- ActualForks ], ?assertEqual(ExpectedForksStripped, ActualForksStripped). - + get_recent(Peer, Type) -> case get_recent(Peer) of info_unavailable -> info_unavailable; @@ -285,7 +285,7 @@ get_recent(Peer) -> timeout => 2 * 1000 }) of - {ok, {{<<"200">>, _}, _, JSON, _, _}} -> + {ok, {{<<"200">>, _}, _, JSON, _, _}} -> case ar_serialize:json_decode(JSON, [return_maps]) of {ok, JsonMap} -> JsonMap; @@ -293,4 +293,4 @@ get_recent(Peer) -> info_unavailable end; _ -> info_unavailable - end. \ No newline at end of file + end. diff --git a/apps/arweave/test/ar_multiple_txs_per_wallet_tests.erl b/apps/arweave/test/ar_multiple_txs_per_wallet_tests.erl index c56f59049..cab7bccb5 100644 --- a/apps/arweave/test/ar_multiple_txs_per_wallet_tests.erl +++ b/apps/arweave/test/ar_multiple_txs_per_wallet_tests.erl @@ -246,7 +246,7 @@ returns_error_when_txs_exceed_balance(B0, TXs) -> {ok, {{<<"400">>, _}, _, Body, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), + peer => ar_test_node:peer_addr(peer1), path => "/tx", body => ar_serialize:jsonify(ar_serialize:tx_to_json_struct(ExceedBalanceTX)) }), @@ -396,7 +396,7 @@ test_does_not_allow_to_replay_empty_wallet_txs() -> {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(peer1), + peer => ar_test_node:peer_addr(peer1), path => "/wallet/" ++ GetBalancePath ++ "/balance" }), Balance = binary_to_integer(Body), @@ -408,7 +408,7 @@ test_does_not_allow_to_replay_empty_wallet_txs() -> {ok, {{<<"200">>, _}, _, Body2, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(peer1), + peer => ar_test_node:peer_addr(peer1), path => "/wallet/" ++ GetBalancePath ++ "/balance" }), ?assertEqual(0, binary_to_integer(Body2)), @@ -540,13 +540,13 @@ test_drops_v1_txs_exceeding_mempool_limit() -> end, lists:sublist(TXs, 5) ), - {ok, Mempool1} = ar_http_iface_client:get_mempool(ar_test_node:peer_ip(peer1)), + {ok, Mempool1} = ar_http_iface_client:get_mempool(ar_test_node:peer_addr(peer1)), %% The transactions have the same utility therefore they are sorted in the %% order of submission. ?assertEqual([TX#tx.id || TX <- lists:sublist(TXs, 5)], Mempool1), Last = lists:last(TXs), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, Last, false), - {ok, Mempool2} = ar_http_iface_client:get_mempool(ar_test_node:peer_ip(peer1)), + {ok, Mempool2} = ar_http_iface_client:get_mempool(ar_test_node:peer_addr(peer1)), %% There is no place for the last transaction in the mempool. ?assertEqual([TX#tx.id || TX <- lists:sublist(TXs, 5)], Mempool2). @@ -575,13 +575,13 @@ drops_v2_txs_exceeding_mempool_limit() -> end, lists:sublist(TXs, 10) ), - {ok, Mempool1} = ar_http_iface_client:get_mempool(ar_test_node:peer_ip(peer1)), + {ok, Mempool1} = ar_http_iface_client:get_mempool(ar_test_node:peer_addr(peer1)), %% The transactions have the same utility therefore they are sorted in the %% order of submission. ?assertEqual([TX#tx.id || TX <- lists:sublist(TXs, 10)], Mempool1), Last = lists:last(TXs), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_test_node:post_tx_to_peer(peer1, Last, false), - {ok, Mempool2} = ar_http_iface_client:get_mempool(ar_test_node:peer_ip(peer1)), + {ok, Mempool2} = ar_http_iface_client:get_mempool(ar_test_node:peer_addr(peer1)), %% The last TX is twice as big and twice as valuable so it replaces two %% other transactions in the memory pool. ?assertEqual([Last#tx.id | [TX#tx.id || TX <- lists:sublist(TXs, 8)]], Mempool2), @@ -589,7 +589,7 @@ drops_v2_txs_exceeding_mempool_limit() -> StrippedTX = ar_test_node:sign_tx(Key, #{ last_tx => B0#block.indep_hash, data => BigChunk, tags => [{<<"nonce">>, integer_to_binary(12)}] }), ar_test_node:assert_post_tx_to_peer(peer1, StrippedTX#tx{ data = <<>> }), - {ok, Mempool3} = ar_http_iface_client:get_mempool(ar_test_node:peer_ip(peer1)), + {ok, Mempool3} = ar_http_iface_client:get_mempool(ar_test_node:peer_addr(peer1)), ?assertEqual([Last#tx.id] ++ [TX#tx.id || TX <- lists:sublist(TXs, 8)] ++ [StrippedTX#tx.id], Mempool3). diff --git a/apps/arweave/test/ar_node_tests.erl b/apps/arweave/test/ar_node_tests.erl index fb7ee08ff..8c94c9ef2 100644 --- a/apps/arweave/test/ar_node_tests.erl +++ b/apps/arweave/test/ar_node_tests.erl @@ -188,7 +188,7 @@ test_persisted_mempool() -> %% Expect the pending transactions to be picked up and distributed. ok = application:set_env(arweave, config, Config#config{ start_from_latest_state = false, - peers = [ar_test_node:peer_ip(peer1)] + peers = [ar_test_node:peer_addr(peer1)] }), ar:start_dependencies(), ar_test_node:wait_until_joined(), diff --git a/apps/arweave/test/ar_p3_tests.erl b/apps/arweave/test/ar_p3_tests.erl index ff38babbd..a4d2f99f9 100644 --- a/apps/arweave/test/ar_p3_tests.erl +++ b/apps/arweave/test/ar_p3_tests.erl @@ -40,7 +40,7 @@ test_not_found() -> ar_p3:handle_call({allow_request, raw_request(<<"GET">>, <<"/info">>)}, [],Config)), ?assertEqual( {reply, {true, not_p3_service}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, raw_request(<<"GET">>, <<"/invalid_endpoint">>)}, [], Config)). test_valid_request() -> @@ -58,7 +58,7 @@ test_valid_request() -> crypto:strong_rand_bytes(32) ), Config = sample_p3_config(), - {_, {_, Transaction1}, _} = Result1 = ar_p3:handle_call({allow_request, + {_, {_, Transaction1}, _} = Result1 = ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -70,8 +70,8 @@ test_valid_request() -> Result1, "Valid 'modSeq' header"), ?assertEqual(<<"GET /price/1000">>, Transaction1#p3_transaction.description), - - {_, {_, Transaction2}, _} = Result2 = ar_p3:handle_call({allow_request, + + {_, {_, Transaction2}, _} = Result2 = ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -93,7 +93,7 @@ test_zero_rate() -> ?ARWEAVE_AR ), ZeroRateConfig = sample_p3_config(crypto:strong_rand_bytes(32), 0, 2, 0), - {_, {_, Transaction1}, _} = Result1 = ar_p3:handle_call({allow_request, + {_, {_, Transaction1}, _} = Result1 = ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -107,7 +107,7 @@ test_zero_rate() -> ?assertEqual(<<"GET /price/1000">>, Transaction1#p3_transaction.description), ?assertEqual( {reply, {false, invalid_header}, ZeroRateConfig}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, raw_request(<<"GET">>, <<"/price/1000">>)}, [], ZeroRateConfig), "Unsigned request should fail"). @@ -128,7 +128,7 @@ test_checksum_request() -> Config = sample_p3_config(), ?assertEqual( {reply, {false, insufficient_funds}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -137,7 +137,7 @@ test_checksum_request() -> "Valid checksum"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -165,7 +165,7 @@ test_bad_headers() -> Config = sample_p3_config(), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -175,7 +175,7 @@ test_bad_headers() -> "Empty 'modSeq' header"), ?assertEqual( {reply, {false, stale_mod_seq}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -185,7 +185,7 @@ test_bad_headers() -> "Bad 'modSeq' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ADDRESS_HEADER => EncodedAddress @@ -193,7 +193,7 @@ test_bad_headers() -> "Missing 'endpoint' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<>>, @@ -202,7 +202,7 @@ test_bad_headers() -> "Empty 'endpoint' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/chunk/{offset}">>, @@ -211,7 +211,7 @@ test_bad_headers() -> "Bad 'endpoint' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => "/price/{bytes}", @@ -220,7 +220,7 @@ test_bad_headers() -> "Bad 'endpoint' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">> @@ -228,7 +228,7 @@ test_bad_headers() -> "Missing 'address' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -237,7 +237,7 @@ test_bad_headers() -> "Empty 'address' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -246,7 +246,7 @@ test_bad_headers() -> "Decoded 'address' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -255,7 +255,7 @@ test_bad_headers() -> "Wrong 'address' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -265,7 +265,7 @@ test_bad_headers() -> "Mismatch 'price' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, raw_request(<<"GET">>, <<"/price/1000">>, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -274,7 +274,7 @@ test_bad_headers() -> "Missing 'signature' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, raw_request(<<"GET">>, <<"/price/1000">>, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -284,7 +284,7 @@ test_bad_headers() -> "Empty 'signature' header"), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, raw_request(<<"GET">>, <<"/price/1000">>, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -301,7 +301,7 @@ test_bad_headers() -> ValidHeaders = maps:get(headers, ValidRequest), ?assertEqual( {reply, {false, invalid_header}, Config}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, ValidRequest#{ headers => ValidHeaders#{ ?P3_SIGNATURE_HEADER => ar_util:decode(maps:get(?P3_SIGNATURE_HEADER, ValidHeaders)) @@ -324,7 +324,7 @@ test_bad_config() -> NoPaymentsConfig = Config#p3_config{ payments = #{} }, ?assertEqual( {reply, {false, invalid_header}, NoPaymentsConfig}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -341,7 +341,7 @@ test_bad_config() -> } }, ?assertEqual( {reply, {false, invalid_header}, MismatchedPaymentsConfig}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -358,7 +358,7 @@ test_bad_config() -> } } }, ?assertEqual( {reply, {false, invalid_header}, NoRateConfig}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -375,7 +375,7 @@ test_bad_config() -> } } }, ?assertEqual( {reply, {false, invalid_header}, MismatchRateConfig}, - ar_p3:handle_call({allow_request, + ar_p3:handle_call({allow_request, signed_request(<<"GET">>, <<"/price/1000">>, PrivKey, #{ ?P3_ENDPOINT_HEADER => <<"/price/{bytes}">>, @@ -576,7 +576,7 @@ e2e_deposit_before_charge() -> "No balance change expected"), ?assertMatch( - {ok, {{<<"400">>, <<"Bad Request">>}, _, + {ok, {{<<"400">>, <<"Bad Request">>}, _, <<"{\"error\":\"size_must_be_an_integer\"}">>, _, _}}, http_request( signed_request(<<"GET">>, <<"/price/abc">>, Priv1, @@ -721,7 +721,7 @@ e2e_charge_before_deposit() -> TX1 = ar_test_node:sign_tx(Wallet1, #{ target => Address2, quantity => 10 }), ar_test_node:assert_post_tx_to_peer(main, TX1), - + ar_test_node:mine(), wait_until_height(1), @@ -745,7 +745,7 @@ e2e_charge_before_deposit() -> TX2 = ar_test_node:sign_tx(Wallet1, #{ target => DepositAddress, quantity => 1200 }), ar_test_node:assert_post_tx_to_peer(main, TX2), - + ar_test_node:mine(), wait_until_height(3), @@ -841,12 +841,12 @@ e2e_restart_p3_service() -> ar_test_node:rejoin_on(#{ node => main, join_on => peer1 }), ?assertEqual(5, ar_p3_db:get_scan_height(), "Restarting node should not have reset scan height db: scan height 5"), - + ok = application:set_env(arweave, config, BaseConfig). %% @doc Test that a bunch of concurrent requests don't overspend the P3 account and that they %% are gated before they are processed (i.e. if the account does not have sufficient balance, -%% the request is not processed at all) +%% the request is not processed at all) e2e_concurrent_requests() -> Wallet1 = {Priv1, Pub1} = ar_wallet:new(), {_, Pub3} = ar_wallet:new(), @@ -867,7 +867,7 @@ e2e_concurrent_requests() -> %% Post a 100 winston deposit and wait for it to be picked up. TX1 = ar_test_node:sign_tx(Wallet1, #{ target => DepositAddress, quantity => 100 }), ar_test_node:assert_post_tx_to_peer(main, TX1), - + ar_test_node:mine(), wait_until_height(1), @@ -972,7 +972,7 @@ get_balance2(EncodedAddress, Network, Token) -> ), {Status, Balance}. -signed_request(Method, Path, PrivKey, Headers) +signed_request(Method, Path, PrivKey, Headers) when is_map(Headers) -> Message = build_message(Headers), EncodedSignature = ar_util:encode(ar_wallet:sign(PrivKey, Message)), @@ -984,14 +984,14 @@ raw_request(Method, Path) -> raw_request(Method, Path, #{}). raw_request(Method, Path, Headers) when is_bitstring(Method) and is_bitstring(Path) and is_map(Headers) -> - #{ + #{ method => Method, path => Path, headers => Headers }. http_request(#{method := M, path := P, headers := H}) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {_, _, _, _, Port} = Peer, Method = case M of <<"GET">> -> get; @@ -1010,4 +1010,4 @@ http_request(#{method := M, path := P, headers := H}) -> %%% XXX TO TEST: %%% - rescanning on ndoe restart %%% - what if the deposit address changes? -%%% - same deposit twice (maybe use txid as db key) \ No newline at end of file +%%% - same deposit twice (maybe use txid as db key) diff --git a/apps/arweave/test/ar_post_block_tests.erl b/apps/arweave/test/ar_post_block_tests.erl index ab306b4bd..6dfb65eb3 100644 --- a/apps/arweave/test/ar_post_block_tests.erl +++ b/apps/arweave/test/ar_post_block_tests.erl @@ -97,22 +97,22 @@ post_2_8_test_() -> test_mitm_poa_chunk_tamper_warn({_Key, B, _PrevB}) -> %% Verify that, in 2.7, we don't ban a peer if the poa.chunk is tampered with. ok = ar_events:subscribe(block), - assert_not_banned(ar_test_node:peer_ip(main)), + assert_not_banned(ar_test_node:peer_addr(main)), B2 = B#block{ poa = #poa{ chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) } }, post_block(B2, invalid_first_chunk), - assert_not_banned(ar_test_node:peer_ip(main)). + assert_not_banned(ar_test_node:peer_addr(main)). test_mitm_poa2_chunk_tamper_warn({Key, B, PrevB}) -> %% Verify that, in 2.7, we don't ban a peer if the poa2.chunk is tampered with. %% For this test we have to re-sign the block with the new poa2.chunk - but that's just a %% test limitation. In the wild the poa2 chunk could be modified without resigning. ok = ar_events:subscribe(block), - assert_not_banned(ar_test_node:peer_ip(main)), - B2 = sign_block(B#block{ + assert_not_banned(ar_test_node:peer_addr(main)), + B2 = sign_block(B#block{ recall_byte2 = 100000000, poa2 = #poa{ chunk = crypto:strong_rand_bytes(?DATA_CHUNK_SIZE) } }, PrevB, Key), post_block(B2, invalid_second_chunk), - assert_not_banned(ar_test_node:peer_ip(main)). + assert_not_banned(ar_test_node:peer_addr(main)). test_reject_block_invalid_proof_size({Key, B, PrevB}) -> ok = ar_events:subscribe(block), @@ -246,11 +246,11 @@ test_reject_block_invalid_wallet_list({Key, B, PrevB}) -> test_reject_block_invalid_packing_difficulty({Key, B, PrevB}) -> ok = ar_events:subscribe(block), - assert_not_banned(ar_test_node:peer_ip(main)), + assert_not_banned(ar_test_node:peer_addr(main)), B2 = sign_block(B#block{ unpacked_chunk_hash = <<>>, packing_difficulty = 33 }, PrevB, Key), post_block(B2, invalid_first_unpacked_chunk), - assert_not_banned(ar_test_node:peer_ip(main)), + assert_not_banned(ar_test_node:peer_addr(main)), C = crypto:strong_rand_bytes(262144), PackedC = crypto:strong_rand_bytes(262144 div 32), UH = crypto:hash(sha256, C), @@ -260,7 +260,7 @@ test_reject_block_invalid_packing_difficulty({Key, B, PrevB}) -> poa = PoA#poa{ unpacked_chunk = C, chunk = PackedC }, unpacked_chunk_hash = UH, chunk_hash = H }, PrevB, Key), post_block(B3, invalid_packing_difficulty), - assert_banned(ar_test_node:peer_ip(main)). + assert_banned(ar_test_node:peer_addr(main)). %% ------------------------------------------------------------------------------------------ %% Others tests @@ -315,7 +315,7 @@ test_rejects_invalid_blocks() -> %% The valid block with the ID from the failed attempt can still go through. post_block(B1, valid), %% Try to post the same block again. - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), ?assertMatch({ok, {{<<"208">>, _}, _, _, _, _}}, send_new_block(Peer, B1)), %% Correct hash, but invalid signature. B2Preimage = B1#block{ signature = <<>> }, @@ -580,14 +580,14 @@ test_send_block2() -> previous_block = B0#block.indep_hash, tx_prefixes = [binary:part(TX#tx.id, 0, 8) || TX <- TXs2] }, {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(Announcement) }), Response = ar_serialize:binary_to_block_announcement_response(Body), ?assertEqual({ok, #block_announcement_response{ missing_chunk = true, missing_tx_indices = [0, 2, 4, 6, 8] }}, Response), Announcement2 = Announcement#block_announcement{ recall_byte = 0 }, {ok, {{<<"200">>, _}, _, Body2, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(Announcement2) }), Response2 = ar_serialize:binary_to_block_announcement_response(Body2), %% We always report missing chunk currently. @@ -595,16 +595,16 @@ test_send_block2() -> missing_tx_indices = [0, 2, 4, 6, 8] }}, Response2), Announcement3 = Announcement#block_announcement{ recall_byte = 100000000000000 }, {ok, {{<<"200">>, _}, _, Body, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(Announcement3) }), {ok, {{<<"418">>, _}, _, Body3, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block2", + peer => ar_test_node:peer_addr(peer1), path => "/block2", body => ar_serialize:block_to_binary(B) }), ?assertEqual(iolist_to_binary(lists:foldl(fun(#tx{ id = TXID }, Acc) -> [TXID | Acc] end, [], TXs2 -- EverySecondTX)), Body3), B2 = B#block{ txs = [lists:nth(1, TXs2) | tl(B#block.txs)] }, {ok, {{<<"418">>, _}, _, Body4, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block2", + peer => ar_test_node:peer_addr(peer1), path => "/block2", body => ar_serialize:block_to_binary(B2) }), ?assertEqual(iolist_to_binary(lists:foldl(fun(#tx{ id = TXID }, Acc) -> [TXID | Acc] end, [], (TXs2 -- EverySecondTX) -- [lists:nth(1, TXs2)])), Body4), @@ -614,29 +614,29 @@ test_send_block2() -> ar_test_node:mine(), [{H2, _, _}, _, _] = wait_until_height(2), {ok, {{<<"412">>, _}, _, <<>>, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = H2, previous_block = B#block.indep_hash }) }), BTXs = ar_storage:read_tx(B#block.txs), B3 = B#block{ txs = BTXs }, {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block2", + peer => ar_test_node:peer_addr(peer1), path => "/block2", body => ar_serialize:block_to_binary(B3) }), {ok, {{<<"200">>, _}, _, SerializedB, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/block2/height/1" }), + peer => ar_test_node:peer_addr(main), path => "/block2/height/1" }), ?assertEqual({ok, B}, ar_serialize:binary_to_block(SerializedB)), Map = element(2, lists:foldl(fun(TX, {N, M}) -> {N + 1, maps:put(TX#tx.id, N, M)} end, {0, #{}}, TXs2)), {ok, {{<<"200">>, _}, _, Serialized2B, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/block2/height/1", + peer => ar_test_node:peer_addr(main), path => "/block2/height/1", body => << 1:1, 0:(8 * 125 - 1) >> }), ?assertEqual({ok, B#block{ txs = [case maps:get(TX#tx.id, Map) == 0 of true -> TX; _ -> TX#tx.id end || TX <- BTXs] }}, ar_serialize:binary_to_block(Serialized2B)), {ok, {{<<"200">>, _}, _, Serialized2B, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/block2/height/1", + peer => ar_test_node:peer_addr(main), path => "/block2/height/1", body => << 1:1, 0:7 >> }), {ok, {{<<"200">>, _}, _, Serialized3B, _, _}} = ar_http:req(#{ method => get, - peer => ar_test_node:peer_ip(main), path => "/block2/height/1", + peer => ar_test_node:peer_addr(main), path => "/block2/height/1", body => << 0:1, 1:1, 0:1, 1:1, 0:4 >> }), ?assertEqual({ok, B#block{ txs = [case lists:member(maps:get(TX#tx.id, Map), [1, 3]) of true -> TX; _ -> TX#tx.id end || TX <- BTXs] }}, @@ -644,7 +644,7 @@ test_send_block2() -> B4 = read_block_when_stored(H2, true), timer:sleep(500), {ok, {{<<"200">>, _}, _, <<"OK">>, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block2", + peer => ar_test_node:peer_addr(peer1), path => "/block2", body => ar_serialize:block_to_binary(B4) }), ar_test_node:connect_to_peer(peer1), lists:foreach( @@ -656,7 +656,7 @@ test_send_block2() -> ), B5 = ar_storage:read_block(ar_node:get_current_block_hash()), {ok, {{<<"208">>, _}, _, _, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = B5#block.indep_hash, previous_block = B5#block.previous_block }) }), @@ -665,7 +665,7 @@ test_send_block2() -> [_ | _] = wait_until_height(3 + ?SEARCH_SPACE_UPPER_BOUND_DEPTH + 1), B6 = ar_storage:read_block(ar_node:get_current_block_hash()), {ok, {{<<"200">>, _}, _, Body5, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = B6#block.indep_hash, previous_block = B6#block.previous_block, @@ -675,7 +675,7 @@ test_send_block2() -> missing_tx_indices = [] }}, ar_serialize:binary_to_block_announcement_response(Body5)), {ok, {{<<"200">>, _}, _, Body6, _, _}} = ar_http:req(#{ method => post, - peer => ar_test_node:peer_ip(peer1), path => "/block_announcement", + peer => ar_test_node:peer_addr(peer1), path => "/block_announcement", body => ar_serialize:block_announcement_to_binary(#block_announcement{ indep_hash = B6#block.indep_hash, previous_block = B6#block.previous_block, diff --git a/apps/arweave/test/ar_pricing_tests.erl b/apps/arweave/test/ar_pricing_tests.erl index 21dff2eb4..50d8f79ff 100644 --- a/apps/arweave/test/ar_pricing_tests.erl +++ b/apps/arweave/test/ar_pricing_tests.erl @@ -107,7 +107,7 @@ test_price_per_gib_minute_transition_phases() -> "After 2.7.2 transition end"). test_v2_price() -> - AtTransitionEnd = ar_pricing_transition:transition_start_2_7_2() + + AtTransitionEnd = ar_pricing_transition:transition_start_2_7_2() + ar_pricing_transition:transition_length(ar_pricing_transition:transition_start_2_7_2()), %% 2 chunks per partition when running tests @@ -130,14 +130,14 @@ test_v2_price() -> do_price_per_gib_minute_post_transition(BeyondTransition, 61440, 122880, 92160). test_v2_price_with_poa1_diff_multiplier() -> - AtTransitionEnd = ar_pricing_transition:transition_start_2_7_2() + + AtTransitionEnd = ar_pricing_transition:transition_start_2_7_2() + ar_pricing_transition:transition_length(ar_pricing_transition:transition_start_2_7_2()), %% 2 chunks per partition when running tests %% If we get 1 solution per chunk (or 2 per partition), then we expect a price of 61440 %% that's our "baseline" for the purposes of this explanation %% - %% Note: in these tests teh poa1 difficulty modifier is set to 2, which changes the + %% Note: in these tests teh poa1 difficulty modifier is set to 2, which changes the %% number of solutions per chunk. %% %% AllOneChunkBaseline: 0.5x baseline @@ -150,7 +150,7 @@ test_v2_price_with_poa1_diff_multiplier() -> %% => 3 per partition %% MixedChunkBaseline: 0.5x baseline %% - 2 1-chunk blocks, 1 2-chunk blocks - %% => 3/4 solutions per chunk + %% => 3/4 solutions per chunk %% => 1.5 per partition %% => Since we deal in integers, that gets rounded to 1 per partition do_price_per_gib_minute_post_transition(AtTransitionEnd, 30720, 92160, 30720), @@ -159,7 +159,7 @@ test_v2_price_with_poa1_diff_multiplier() -> do_price_per_gib_minute_post_transition(Height, AllOneChunkBaseline, AllTwoChunkBaseline, MixedChunkBaseline) -> - + PoA1DiffMultiplier = ar_difficulty:poa1_diff_multiplier(Height), B0 = #block{ reward_history = reward_history(1, 1), block_time_history = block_time_history(1, 1) }, @@ -653,7 +653,7 @@ assert_new_account_fee() -> %% @doc Return the current balance of the given account. get_balance(Pub) -> Address = ar_wallet:to_address(Pub), - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, @@ -678,7 +678,7 @@ get_balance(Pub) -> get_reserved_balance(Address) -> - Peer = ar_test_node:peer_ip(main), + Peer = ar_test_node:peer_addr(main), {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, diff --git a/apps/arweave/test/ar_serialize_tests.erl b/apps/arweave/test/ar_serialize_tests.erl index 2d9d7071c..e5f011756 100644 --- a/apps/arweave/test/ar_serialize_tests.erl +++ b/apps/arweave/test/ar_serialize_tests.erl @@ -309,7 +309,7 @@ candidate_to_json_struct_test() -> cache_ref = {rand:uniform(100), rand:uniform(100), rand:uniform(100), make_ref()}, chunk1 = crypto:strong_rand_bytes(256 * 1024), chunk2 = crypto:strong_rand_bytes(256 * 1024), - cm_lead_peer = ar_test_node:peer_ip(main)}). + cm_lead_peer = ar_test_node:peer_addr(main)}). solution_to_json_struct_test() -> diff --git a/apps/arweave/test/ar_start_from_block_tests.erl b/apps/arweave/test/ar_start_from_block_tests.erl index 7b88a5664..7034414ac 100644 --- a/apps/arweave/test/ar_start_from_block_tests.erl +++ b/apps/arweave/test/ar_start_from_block_tests.erl @@ -16,7 +16,7 @@ test_start_from_block() -> ar_test_node:start_peer(peer2, B0), ar_test_node:connect_to_peer(peer1), ar_test_node:connect_to_peer(peer2), - + %% Mine a few blocks, shared by both peers ar_test_node:mine(peer1), ar_test_node:wait_until_height(peer1, 1), @@ -148,7 +148,7 @@ get_block_index(Peer) -> ar_test_node:remote_call(Peer, ar_node, get_blocks, []). get_reward_history(Peer, H) -> - PeerIP = ar_test_node:peer_ip(Peer), + PeerIP = ar_test_node:peer_addr(Peer), case ar_http:req(#{ peer => PeerIP, method => get, diff --git a/apps/arweave/test/ar_test_data_sync.erl b/apps/arweave/test/ar_test_data_sync.erl index 5c29618d6..0da52f5f7 100644 --- a/apps/arweave/test/ar_test_data_sync.erl +++ b/apps/arweave/test/ar_test_data_sync.erl @@ -31,14 +31,14 @@ setup_nodes2(#{ addr := MainAddr, peer_addr := PeerAddr } = Options) -> Wallet = {_, Pub} = ar_wallet:new(), [B0] = ar_weave:init([{ar_wallet:to_address(Pub), ?AR(200000), <<>>}]), {ok, Config} = application:get_env(arweave, config), + {ok, PeerConfig} = ar_test_node:remote_call(peer1, application, get_env, [arweave, config]), case maps:get(storage_modules, Options, not_found) of not_found -> - ar_test_node:start(B0, MainAddr, Config); + ar_test_node:start(B0, MainAddr, Config#config{local_peers = [{127,0,0,1, PeerConfig#config.port}]}); StorageModules -> - ar_test_node:start(B0, MainAddr, Config, StorageModules) + ar_test_node:start(B0, MainAddr, Config#config{local_peers = [{127,0,0,1, PeerConfig#config.port}]}, StorageModules) end, - {ok, PeerConfig} = ar_test_node:remote_call(peer1, application, get_env, [arweave, config]), - ar_test_node:start_peer(peer1, B0, PeerAddr, PeerConfig), + ar_test_node:start_peer(peer1, B0, PeerAddr, PeerConfig#config{local_peers = [{127,0,0,1, Config#config.port}]}), ar_test_node:connect_to_peer(peer1), Wallet. @@ -235,7 +235,7 @@ build_proofs(TX, Chunks, TXs, BlockStartOffset, Height) -> ). get_tx_offset(Node, TXID) -> - Peer = ar_test_node:peer_ip(Node), + Peer = ar_test_node:peer_addr(Node), ar_http:req(#{ method => get, peer => Peer, diff --git a/apps/arweave/test/ar_test_node.erl b/apps/arweave/test/ar_test_node.erl index 86c63e9d0..5e4b7cf92 100644 --- a/apps/arweave/test/ar_test_node.erl +++ b/apps/arweave/test/ar_test_node.erl @@ -13,7 +13,7 @@ stop/0, stop/1, start_peer/2, start_peer/3, start_peer/4, peer_name/1, peer_port/1, stop_peers/0, stop_peer/1, connect_to_peer/1, disconnect_from/1, join/2, join_on/1, rejoin_on/1, - peer_ip/1, get_node_namespace/0, get_unused_port/0, + peer_addr/1, get_node_namespace/0, get_unused_port/0, mine/0, get_tx_anchor/1, get_tx_confirmations/2, get_tx_price/2, get_tx_price/3, get_optimistic_tx_price/2, get_optimistic_tx_price/3, @@ -125,7 +125,7 @@ stop_peer(Node) -> ok end. -peer_ip(Node) -> +peer_addr(Node) -> {127, 0, 0, 1, peer_port(Node)}. wait_until_joined(Node) -> @@ -191,15 +191,15 @@ start_coordinated(MiningNodeCount) when MiningNodeCount >= 1, MiningNodeCount =< %% Set weave larger than what we'll cover with the 3 nodes so that every node can find %% a solution. [B0] = ar_weave:init([], get_difficulty_for_invalid_hash(), ?PARTITION_SIZE * 5), - ExitPeer = peer_ip(peer1), - ValidatorPeer = peer_ip(main), + ExitPeer = peer_addr(peer1), + ValidatorPeer = peer_addr(main), MinerNodes = lists:sublist([peer2, peer3, peer4], MiningNodeCount), BaseCMConfig = base_cm_config([ValidatorPeer]), RewardAddr = BaseCMConfig#config.mining_addr, ExitNodeConfig = BaseCMConfig#config{ mine = true, - local_peers = [peer_ip(Peer) || Peer <- MinerNodes] + local_peers = [peer_addr(Peer) || Peer <- MinerNodes] }, ValidatorNodeConfig = BaseCMConfig#config{ mine = false, @@ -210,12 +210,12 @@ start_coordinated(MiningNodeCount) when MiningNodeCount >= 1, MiningNodeCount =< remote_call(peer1, ar_test_node, start_node, [B0, ExitNodeConfig]), %% exit node remote_call(main, ar_test_node, start_node, [B0, ValidatorNodeConfig]), %% validator node - + lists:foreach( fun(I) -> MinerNode = lists:nth(I, MinerNodes), MinerPeers = lists:filter(fun(Peer) -> Peer /= MinerNode end, MinerNodes), - MinerPeerIPs = [peer_ip(Peer) || Peer <- MinerPeers], + MinerPeerIPs = [peer_addr(Peer) || Peer <- MinerPeers], MinerConfig = BaseCMConfig#config{ cm_exit_peer = ExitPeer, @@ -480,7 +480,6 @@ start(B0, RewardAddr, Config, StorageModules) -> peers = [], cm_exit_peer = not_set, cm_peers = [], - local_peers = [], mining_addr = RewardAddr, storage_modules = StorageModules, disk_space_check_frequency = 1000, @@ -532,7 +531,7 @@ get_tx_price(Node, DataSize) -> %% @doc Fetch the fee estimation and the denomination (call GET /price2/[size]/[addr]) %% from the given node. get_tx_price(Node, DataSize, Target) -> - Peer = peer_ip(Node), + Peer = peer_addr(Node), Path = "/price/" ++ integer_to_list(DataSize) ++ "/" ++ binary_to_list(ar_util:encode(Target)), {ok, {{<<"200">>, _}, _, Reply, _, _}} = @@ -570,7 +569,7 @@ get_optimistic_tx_price(Node, DataSize, Target) -> {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, - peer => peer_ip(Node), + peer => peer_addr(Node), path => Path }), binary_to_integer(maps:get(<<"fee">>, jiffy:decode(Reply, [return_maps]))). @@ -674,7 +673,7 @@ join_on(#{ node := Node, join_on := JoinOnNode }, Rejoin) -> remote_call(Node, ar_test_node, join, [JoinOnNode, Rejoin], 20000). join(JoinOnNode, Rejoin) -> - Peer = peer_ip(JoinOnNode), + Peer = peer_addr(JoinOnNode), {ok, Config} = application:get_env(arweave, config), case Rejoin of true -> @@ -714,7 +713,7 @@ connect_to_peer(Node) -> %% Unblock connections possibly blocked in the prior test code. ar_http:unblock_peer_connections(), remote_call(Node, ar_http, unblock_peer_connections, []), - Peer = peer_ip(Node), + Peer = peer_addr(Node), Self = self_node(), %% Make requests to the nodes to make them discover each other. {ok, {{<<"200">>, <<"OK">>}, _, _, _, _}} = @@ -727,7 +726,7 @@ connect_to_peer(Node) -> true = ar_util:do_until( fun() -> Peers = remote_call(Node, ar_peers, get_peers, [lifetime]), - lists:member(peer_ip(Self), Peers) + lists:member(peer_addr(Self), Peers) end, 200, 5000 @@ -735,7 +734,7 @@ connect_to_peer(Node) -> {ok, {{<<"200">>, <<"OK">>}, _, _, _, _}} = ar_http:req(#{ method => get, - peer => peer_ip(Self), + peer => peer_addr(Self), path => "/info", headers => p2p_headers(Node) }), @@ -904,7 +903,7 @@ post_tx_to_peer(Node, TX, Wait) -> post_tx_json(Node, JSON) -> ar_http:req(#{ method => post, - peer => peer_ip(Node), + peer => peer_addr(Node), path => "/tx", body => JSON }). @@ -913,7 +912,7 @@ get_tx_anchor(Node) -> {ok, {{<<"200">>, _}, _, Reply, _, _}} = ar_http:req(#{ method => get, - peer => peer_ip(Node), + peer => peer_addr(Node), path => "/tx_anchor" }), ar_util:decode(Reply). @@ -922,7 +921,7 @@ get_tx_confirmations(Node, TXID) -> Response = ar_http:req(#{ method => get, - peer => peer_ip(Node), + peer => peer_addr(Node), path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/status" }), case Response of @@ -997,9 +996,9 @@ post_and_mine(#{ miner := Node, await_on := AwaitOnNode }, TXs) -> remote_call(AwaitOnNode, ar_test_node, read_block_when_stored, [H, true], 20000). post_block(B, ExpectedResult) when not is_list(ExpectedResult) -> - post_block(B, [ExpectedResult], peer_ip(main)); + post_block(B, [ExpectedResult], peer_addr(main)); post_block(B, ExpectedResults) -> - post_block(B, ExpectedResults, peer_ip(main)). + post_block(B, ExpectedResults, peer_addr(main)). post_block(B, ExpectedResults, Peer) -> ?assertMatch({ok, {{<<"200">>, _}, _, _, _, _}}, send_new_block(Peer, B)), @@ -1010,7 +1009,7 @@ send_new_block(Peer, B) -> ar_serialize:block_to_binary(B)). await_post_block(B, ExpectedResults) -> - await_post_block(B, ExpectedResults, peer_ip(main)). + await_post_block(B, ExpectedResults, peer_addr(main)). await_post_block(#block{ indep_hash = H } = B, ExpectedResults, Peer) -> PostGossipFailureCodes = [invalid_denomination, @@ -1105,7 +1104,7 @@ read_block_when_stored(H, IncludeTXs) -> get_chunk(Node, Offset) -> ar_http:req(#{ method => get, - peer => peer_ip(Node), + peer => peer_addr(Node), path => "/chunk/" ++ integer_to_list(Offset), headers => [{<<"x-bucket-based-offset">>, <<"true">>}] }). @@ -1113,13 +1112,13 @@ get_chunk(Node, Offset) -> get_chunk_proof(Node, Offset) -> ar_http:req(#{ method => get, - peer => peer_ip(Node), + peer => peer_addr(Node), path => "/chunk_proof/" ++ integer_to_list(Offset), headers => [{<<"x-bucket-based-offset">>, <<"true">>}] }). post_chunk(Node, Proof) -> - Peer = peer_ip(Node), + Peer = peer_addr(Node), ar_http:req(#{ method => post, peer => Peer, @@ -1133,7 +1132,7 @@ random_v1_data(Size) -> assert_get_tx_data(Node, TXID, ExpectedData) -> ?debugFmt("Polling for data of ~s.", [ar_util:encode(TXID)]), - Peer = peer_ip(Node), + Peer = peer_addr(Node), true = ar_util:do_until( fun() -> case ar_http:req(#{ method => get, peer => Peer, @@ -1188,7 +1187,7 @@ get_tx_data_in_chunks_traverse_forward(Offset, Start, Peer, Bin) -> [Chunk | Bin]). assert_data_not_found(Node, TXID) -> - Peer = peer_ip(Node), + Peer = peer_addr(Node), ?assertMatch({ok, {{<<"404">>, _}, _, _Binary, _, _}}, ar_http:req(#{ method => get, peer => Peer, path => "/tx/" ++ binary_to_list(ar_util:encode(TXID)) ++ "/data" })). diff --git a/apps/arweave/test/ar_vdf_server_tests.erl b/apps/arweave/test/ar_vdf_server_tests.erl index eaf0198bb..c94fa560d 100644 --- a/apps/arweave/test/ar_vdf_server_tests.erl +++ b/apps/arweave/test/ar_vdf_server_tests.erl @@ -38,7 +38,7 @@ setup_external_update() -> %% auto-computed VDF steps getting in the way. ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), - Config#config{ nonce_limiter_server_trusted_peers = [ + Config#config{ nonce_limiter_server_trusted_peers = [ ar_util:format_peer(vdf_server_1()), ar_util:format_peer(vdf_server_2()) ], mine = true}), @@ -284,20 +284,20 @@ test_vdf_client_fast_block() -> %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, application, get_env, [arweave, config]), - ar_test_node:start_peer(peer1, + ar_test_node:start_peer(peer1, B0, PeerAddress, - PeerConfig#config{ nonce_limiter_server_trusted_peers = [ - ar_util:format_peer(ar_test_node:peer_ip(main)) ] }), + PeerConfig#config{ nonce_limiter_server_trusted_peers = [ + ar_util:format_peer(ar_test_node:peer_addr(main)) ] }), %% Start main as a VDF server ar_test_node:start( B0, ar_wallet:to_address(ar_wallet:new_keyfile()), - Config#config{ nonce_limiter_client_peers = [ - ar_util:format_peer(ar_test_node:peer_ip(peer1)) ]}), + Config#config{ nonce_limiter_client_peers = [ + ar_util:format_peer(ar_test_node:peer_addr(peer1)) ]}), ar_test_node:connect_to_peer(peer1), %% Post the block to the VDF client. It won't be able to validate it since the VDF server %% isn't aware of the new VDF session yet. - send_new_block(ar_test_node:peer_ip(peer1), B1), + send_new_block(ar_test_node:peer_addr(peer1), B1), timer:sleep(10000), ?assertEqual(1, length(ar_test_node:remote_call(peer1, ar_node, get_blocks, [])), @@ -306,7 +306,7 @@ test_vdf_client_fast_block() -> %% After the VDF server receives the block, it should push the old and new VDF sessions %% to the VDF client allowing it to validate teh block. - send_new_block(ar_test_node:peer_ip(main), B1), + send_new_block(ar_test_node:peer_addr(main), B1), %% If all is right, the VDF server should push the old and new VDF sessions allowing %% the VDF clietn to finally validate the block. BI = assert_wait_until_height(peer1, 1). @@ -331,7 +331,7 @@ test_vdf_client_fast_block_pull_interface() -> %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, application, get_env, [arweave, config]), - ar_test_node:start_peer(peer1, + ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ "127.0.0.1:" ++ integer_to_list(Config#config.port) ], enable = [vdf_server_pull | PeerConfig#config.enable] }), @@ -343,7 +343,7 @@ test_vdf_client_fast_block_pull_interface() -> %% Post the block to the VDF client. It won't be able to validate it since the VDF server %% isn't aware of the new VDF session yet. - send_new_block(ar_test_node:peer_ip(peer1), B1), + send_new_block(ar_test_node:peer_addr(peer1), B1), timer:sleep(10000), ?assertEqual(1, length(ar_test_node:remote_call(peer1, ar_node, get_blocks, [])), @@ -352,7 +352,7 @@ test_vdf_client_fast_block_pull_interface() -> %% After the VDF server receives the block, it should push the old and new VDF sessions %% to the VDF client allowing it to validate teh block. - send_new_block(ar_test_node:peer_ip(main), B1), + send_new_block(ar_test_node:peer_addr(main), B1), %% If all is right, the VDF server should push the old and new VDF sessions allowing %% the VDF clietn to finally validate the block. BI = assert_wait_until_height(peer1, 1). @@ -376,7 +376,7 @@ test_vdf_client_slow_block() -> %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, application, get_env, [arweave, config]), - ar_test_node:start_peer(peer1, + ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ "127.0.0.1:" ++ integer_to_list(Config#config.port) @@ -392,7 +392,7 @@ test_vdf_client_slow_block() -> %% Post the block to the VDF client, it should validate it "immediately" since the %% VDF server is ahead of the block in the VDF chain. - send_new_block(ar_test_node:peer_ip(peer1), B1), + send_new_block(ar_test_node:peer_addr(peer1), B1), BI = assert_wait_until_height(peer1, 1). test_vdf_client_slow_block_pull_interface() -> @@ -414,7 +414,7 @@ test_vdf_client_slow_block_pull_interface() -> %% Restart peer1 as a VDF client {ok, PeerConfig} = ar_test_node:remote_call(peer1, application, get_env, [arweave, config]), - ar_test_node:start_peer(peer1, + ar_test_node:start_peer(peer1, B0, PeerAddress, PeerConfig#config{ nonce_limiter_server_trusted_peers = [ "127.0.0.1:" ++ integer_to_list(Config#config.port) ], @@ -431,7 +431,7 @@ test_vdf_client_slow_block_pull_interface() -> %% Post the block to the VDF client, it should validate it "immediately" since the %% VDF server is ahead of the block in the VDF chain. - send_new_block(ar_test_node:peer_ip(peer1), B1), + send_new_block(ar_test_node:peer_addr(peer1), B1), BI = assert_wait_until_height(peer1, 1). %% @@ -702,7 +702,7 @@ test_2_servers_backtrack() -> [20, 20, 20, 20, 20, 20, 20, 20, 10, 10, 10, 10, 10, 20, 30], computed_upper_bounds()). -test_mining_session() -> +test_mining_session() -> SessionKey0 = get_current_session_key(), SessionKey1 = {<<"session1">>, 1, 1}, SessionKey2 = {<<"session2">>, 2, 1}, @@ -973,8 +973,8 @@ get_current_session_key() -> mock_add_task() -> { - ar_mining_worker, add_task, - fun(Worker, TaskType, Candidate) -> + ar_mining_worker, add_task, + fun(Worker, TaskType, Candidate) -> ets:insert(add_task, {Worker, TaskType, Candidate#mining_candidate.step_number}) end }.