From d44cd4ffda7af2ec1823bc6989ee47db2216b4de Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Mon, 15 Dec 2025 19:08:06 +0000 Subject: [PATCH 1/7] Server-side rate limiting restructured into a separate application. --- .github/workflows/on-demand.yml | 2 + .github/workflows/x-test-full.yml | 2 + .github/workflows/x-test-vdf.yml | 2 + .gitignore | 1 + apps/arweave/src/ar_blacklist_middleware.erl | 99 +-- apps/arweave/src/ar_config.erl | 791 +++++++++++++++++- apps/arweave/src/ar_http_iface_middleware.erl | 4 +- .../ar_http_iface_rate_limiter_middleware.erl | 90 ++ apps/arweave/src/ar_http_iface_server.erl | 2 +- apps/arweave/test/ar_http_iface_tests.erl | 22 +- apps/arweave/test/ar_test_node.erl | 6 + .../arweave_config/include/arweave_config.hrl | 300 ++++++- apps/arweave_limiter/include/.gitkeep | 0 apps/arweave_limiter/priv/.gitkeep | 0 .../src/arweave_limiter.app.src | 30 + apps/arweave_limiter/src/arweave_limiter.erl | 86 ++ .../src/arweave_limiter_group.erl | 379 +++++++++ .../src/arweave_limiter_metrics.erl | 64 ++ .../src/arweave_limiter_metrics_collector.erl | 85 ++ .../src/arweave_limiter_sup.erl | 196 +++++ .../src/arweave_limiter_time.erl | 17 + .../test/arweave_limiter_group_tests.erl | 609 ++++++++++++++ ...rweave_limiter_metrics_collector_tests.erl | 118 +++ bin/arweave | 16 +- rebar.config | 4 +- 25 files changed, 2819 insertions(+), 106 deletions(-) create mode 100644 apps/arweave/src/ar_http_iface_rate_limiter_middleware.erl create mode 100644 apps/arweave_limiter/include/.gitkeep create mode 100644 apps/arweave_limiter/priv/.gitkeep create mode 100644 apps/arweave_limiter/src/arweave_limiter.app.src create mode 100644 apps/arweave_limiter/src/arweave_limiter.erl create mode 100644 apps/arweave_limiter/src/arweave_limiter_group.erl create mode 100644 apps/arweave_limiter/src/arweave_limiter_metrics.erl create mode 100644 apps/arweave_limiter/src/arweave_limiter_metrics_collector.erl create mode 100644 apps/arweave_limiter/src/arweave_limiter_sup.erl create mode 100644 apps/arweave_limiter/src/arweave_limiter_time.erl create mode 100644 apps/arweave_limiter/test/arweave_limiter_group_tests.erl create mode 100644 apps/arweave_limiter/test/arweave_limiter_metrics_collector_tests.erl diff --git a/.github/workflows/on-demand.yml b/.github/workflows/on-demand.yml index bc67f43dcb..a58f270c10 100644 --- a/.github/workflows/on-demand.yml +++ b/.github/workflows/on-demand.yml @@ -46,6 +46,8 @@ on: - ar_intervals - ar_join - ar_kv + - arweave_limiter_group + - arweave_limiter_metrics_collector - ar_mempool_tests - ar_merkle - ar_mine_randomx_tests diff --git a/.github/workflows/x-test-full.yml b/.github/workflows/x-test-full.yml index a373132bc0..1ac45db630 100644 --- a/.github/workflows/x-test-full.yml +++ b/.github/workflows/x-test-full.yml @@ -61,6 +61,8 @@ jobs: ar_intervals, ar_join, ar_kv, + arweave_limiter_group, + arweave_limiter_metrics_collector, ar_merkle, ar_mining_cache, ar_mining_server, diff --git a/.github/workflows/x-test-vdf.yml b/.github/workflows/x-test-vdf.yml index fb4ece9e9d..90f07d4802 100644 --- a/.github/workflows/x-test-vdf.yml +++ b/.github/workflows/x-test-vdf.yml @@ -59,6 +59,8 @@ jobs: ar_intervals, ar_join, ar_kv, + arweave_limiter_group, + arweave_limiter_metrics_collector, ar_merkle, ar_node, ar_node_utils, diff --git a/.gitignore b/.gitignore index b3451e3dac..07118ce47c 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,4 @@ node_modules screenlog.0 _* *.swp +*~ diff --git a/apps/arweave/src/ar_blacklist_middleware.erl b/apps/arweave/src/ar_blacklist_middleware.erl index 4f4c22a9e1..c7c6abf2a5 100644 --- a/apps/arweave/src/ar_blacklist_middleware.erl +++ b/apps/arweave/src/ar_blacklist_middleware.erl @@ -1,35 +1,17 @@ -module(ar_blacklist_middleware). --behaviour(cowboy_middleware). - --export([start/0, execute/2, reset/0, reset_rate_limit/3, - ban_peer/2, is_peer_banned/1, cleanup_ban/1, decrement_ip_addr/2]). +-export([start/0, ban_peer/2, is_peer_banned/1, cleanup_ban/1]). -export([start_link/0]). +-ifdef(AR_TEST). +-export([reset/0]). +-endif. + -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). -include_lib("arweave/include/ar_blacklist_middleware.hrl"). -include_lib("eunit/include/eunit.hrl"). -execute(Req, Env) -> - IPAddr = requesting_ip_addr(Req), - {ok, Config} = arweave_config:get_env(), - case lists:member(blacklist, Config#config.disable) of - true -> - {ok, Req, Env}; - _ -> - LocalIPs = [peer_to_ip_addr(Peer) || Peer <- Config#config.local_peers], - case lists:member(IPAddr, LocalIPs) of - true -> - {ok, Req, Env}; - false -> - case increment_ip_addr(IPAddr, Req) of - {block, Limit} -> {stop, blacklisted(Limit, Req)}; - pass -> {ok, Req, Env} - end - end - end. - start_link() -> {ok, spawn_link(fun() -> start() end)}. @@ -43,6 +25,10 @@ start() -> #{ skip_on_shutdown => false } ). +reset() -> + true = ets:delete_all_objects(?MODULE), + ok. + %% Ban a peer completely for TTLSeconds seoncds. Since we cannot trust the port, %% we ban the whole IP address. ban_peer(Peer, TTLSeconds) -> @@ -83,71 +69,4 @@ cleanup_ban(TableID) -> end. %private functions -blacklisted(Limit, Req) -> - cowboy_req:reply( - 429, - #{ - <<"connection">> => <<"close">>, - <<"retry-after">> => integer_to_binary(?THROTTLE_PERIOD div 1000), - <<"x-rate-limit-limit">> => integer_to_binary(Limit) - }, - <<"Too Many Requests">>, - Req - ). - -reset() -> - true = ets:delete_all_objects(?MODULE), - ok. - -reset_rate_limit(TableID, IPAddr, Path) -> - case ets:whereis(?MODULE) of - TableID -> - ets:delete(?MODULE, {rate_limit, IPAddr, Path}); - _ -> - table_owner_died - end. - -increment_ip_addr(IPAddr, Req) -> - case ets:whereis(?MODULE) of - undefined -> pass; - _ -> update_ip_addr(IPAddr, Req, 1) - end. - -decrement_ip_addr(IPAddr, Req) -> - case ets:whereis(?MODULE) of - undefined -> pass; - _ -> update_ip_addr(IPAddr, Req, -1) - end. - -update_ip_addr(IPAddr, Req, Delta) -> - {PathKey, Limit} = get_key_limit(IPAddr, Req), - %% Divide by 2 as the throttle period is 30 seconds. - RequestLimit = Limit div 2, - Key = {rate_limit, IPAddr, PathKey}, - case ets:update_counter(?MODULE, Key, {2, Delta}, {Key, 0}) of - 1 -> - _ = ar_timer:apply_after( - ?THROTTLE_PERIOD, - ?MODULE, - reset_rate_limit, - [ets:whereis(?MODULE), IPAddr, PathKey], - #{ skip_on_shutdown => true } - ), - pass; - Count when Count =< RequestLimit -> - pass; - _ -> - {block, Limit} - end. - -requesting_ip_addr(Req) -> - {IPAddr, _} = cowboy_req:peer(Req), - IPAddr. - peer_to_ip_addr({A, B, C, D, _}) -> {A, B, C, D}. - -get_key_limit(IPAddr, Req) -> - Path = ar_http_iface_server:split_path(cowboy_req:path(Req)), - {ok, Config} = arweave_config:get_env(), - Map = maps:get(IPAddr, Config#config.requests_per_minute_limit_by_ip, #{}), - ?RPM_BY_PATH(Path, Map)(). diff --git a/apps/arweave/src/ar_config.erl b/apps/arweave/src/ar_config.erl index 1951b0373f..aeeb979354 100644 --- a/apps/arweave/src/ar_config.erl +++ b/apps/arweave/src/ar_config.erl @@ -899,6 +899,796 @@ parse_options([{<<"http_api.tcp.send_timeout">>, Timeout}|Rest], Config) -> {error, {bad_value, 'http_api.tcp.send_timeout'}, Timeout} end; +%% RATE LIMITER GENERAL +parse_options([{<<"http_api.limiter.general.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.general.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.general.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.general.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.general.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.general.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.general.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.general.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.general.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.general.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.general.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.general.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.general.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.general.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.general.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.general.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.general.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.general.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.general.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.general.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.general.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.general.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.general.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.general.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.general.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER CHUNK +parse_options([{<<"http_api.limiter.chunk.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.chunk.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.chunk.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.chunk.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.chunk.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.chunk.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.chunk.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.chunk.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.chunk.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.chunk.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.chunk.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.chunk.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.chunk.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.chunk.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.chunk.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER DATA_SYNC_RECORD +parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.data_sync_record.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.data_sync_record.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.data_sync_record.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER RECENT_HASH_LIST_DIFF +parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER BLOCK_INDEX +parse_options([{<<"http_api.limiter.block_index.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.block_index.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.block_index.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.block_index.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.block_index.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.block_index.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.block_index.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.block_index.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.block_index.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.block_index.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.block_index.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.block_index.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.block_index.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.block_index.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.block_index.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER WALLET_LIST +parse_options([{<<"http_api.limiter.wallet_list.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.wallet_list.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.wallet_list.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.wallet_list.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.wallet_list.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.wallet_list.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.wallet_list.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.wallet_list.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.wallet_list.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.wallet_list.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.wallet_list.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.wallet_list.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.wallet_list.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER GET_VDF +parse_options([{<<"http_api.limiter.get_vdf.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_vdf.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_vdf.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_vdf.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.get_vdf.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_vdf.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER GET_VDF_SESSION +parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_vdf_session.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.get_vdf_session.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER GET_PREVIOUS_VDF_SESSION +parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled'}, IsDisabled} + end; + +%% RATE LIMITER METRICS +parse_options([{<<"http_api.limiter.metrics.sliding_window_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.metrics.sliding_window_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.sliding_window_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.metrics.sliding_window_duration">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.metrics.sliding_window_duration' = Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.sliding_window_duration'}, Duration} + end; + +parse_options([{<<"http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry'}, Duration} + end; + +parse_options([{<<"http_api.limiter.metrics.leaky_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.metrics.leaky_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.leaky_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.metrics.leaky_tick_interval">>, Duration}|Rest], Config) -> + case Duration of + Duration when is_integer(Duration), Duration > 0 -> + parse_options( + Rest, Config#config{'http_api.limiter.metrics.leaky_tick_interval' = + Duration }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.leaky_tick_interval'}, Duration} + end; + +parse_options([{<<"http_api.limiter.metrics.leaky_tick_reduction">>, Reduction}|Rest], Config) -> + case Reduction of + Reduction when is_integer(Reduction), Reduction > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.metrics.leaky_tick_reduction' = Reduction }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.leaky_tick_reduction'}, Reduction} + end; + +parse_options([{<<"http_api.limiter.metrics.concurrency_limit">>, Limit}|Rest], Config) -> + case Limit of + Limit when is_integer(Limit), Limit > 0 -> + parse_options(Rest, Config#config{'http_api.limiter.metrics.concurrency_limit' = Limit }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.concurrency_limit'}, Limit} + end; + +parse_options([{<<"http_api.limiter.metrics.is_manual_reduction_disabled">>, IsDisabled}|Rest], Config) -> + case IsDisabled of + IsDisabled when is_boolean(IsDisabled) -> + parse_options(Rest, Config#config{'http_api.limiter.metrics.is_manual_reduction_disabled' = IsDisabled }); + _ -> + {error, {bad_value, 'http_api.limiter.metrics.is_manual_reduction_disabled'}, IsDisabled} + end; + parse_options([Opt | _], _) -> {error, unknown, Opt}; parse_options([], Config) -> @@ -1262,4 +2052,3 @@ set_verify_flags(Config) -> max_propagation_peers = 0, max_block_propagation_peers = 0 }. - diff --git a/apps/arweave/src/ar_http_iface_middleware.erl b/apps/arweave/src/ar_http_iface_middleware.erl index dbdfe4d594..b6dab680ae 100644 --- a/apps/arweave/src/ar_http_iface_middleware.erl +++ b/apps/arweave/src/ar_http_iface_middleware.erl @@ -2138,8 +2138,8 @@ handle_post_tx_accepted(Req, TX, Peer) -> %% Exclude successful requests with valid transactions from the %% IP-based throttling, to avoid connectivity issues at the times %% of excessive transaction volumes. - {A, B, C, D, _} = Peer, - ar_blacklist_middleware:decrement_ip_addr({A, B, C, D}, Req), + {A, B, C, D, _} = Peer, %%-> Peer is the peer key for the general rate limiter group. + arweave_limiter:reduce_for_peer(general, {A, B, C, D}), BodyReadTime = ar_http_req:body_read_time(Req), ar_peers:rate_gossiped_data(Peer, tx, erlang:convert_time_unit(BodyReadTime, native, microsecond), diff --git a/apps/arweave/src/ar_http_iface_rate_limiter_middleware.erl b/apps/arweave/src/ar_http_iface_rate_limiter_middleware.erl new file mode 100644 index 0000000000..6bb4fa501e --- /dev/null +++ b/apps/arweave/src/ar_http_iface_rate_limiter_middleware.erl @@ -0,0 +1,90 @@ +%%% +%%% @doc Cowboy handler to manage server-side rate limiting. +%%% +%%% This module provides a routing layer, mapping incoming requests +%%% to respective rate limiter groups (RLG). +%%% The mapping logic can be extended in a quite complex manner if +%%% required, however it should be considered that the execute function will be +%%% called for each HTTP request. +%%% +%%% Also, there is nothing limiting the developer from calling multiple RLGs +%%% for a single request, if necessary. +%%% +%%% The LimiterRef reference in the arweave_limiter:register_or_reject_call/2 +%%% call must match one of the RLGs started by the arweave_limiter application, +%%% otherwise a noproc error will be raised. +%%% +%%% We currency use IP addresses and ports as Keys for the calling peers. +%%% However, any Erlang term might be used as a key in an RLG. +%%% +-module(ar_http_iface_rate_limiter_middleware). + +-behaviour(cowboy_middleware). + +-export([execute/2]). + +-include_lib("arweave/include/ar.hrl"). +-include_lib("arweave_config/include/arweave_config.hrl"). + +execute(Req, Env) -> + LimiterRef = get_limiter_ref(Req), + PeerKey = get_peer_key(Req), + + case arweave_limiter:register_or_reject_call(LimiterRef, PeerKey) of + {reject, Reason, Data} -> + {stop, reject(Req, Reason, Data)}; + _ -> + {ok, Req, Env} + end. + +get_limiter_ref(Req) -> + {ok, Config} = arweave_config:get_env(), + LocalIPs = [config_peer_to_ip_addr(Peer) || Peer <- Config#config.local_peers], + PeerIP = config_peer_to_ip_addr(get_peer_key(Req)), + + case lists:member(PeerIP, LocalIPs) of + true -> + local_peers; + _ -> + Path = ar_http_iface_server:split_path(cowboy_req:path(Req)), + path_to_limiter_ref(Path) + end. + +reject(Req, _Reason, _Data) -> + cowboy_req:reply( + 429, + #{}, + <<"Too Many Requests">>, + Req + ). + +get_peer_key(Req) -> + {{A, B, C, D}, _Port} = cowboy_req:peer(Req), + {A, B, C, D}. + +config_peer_to_ip_addr({{A, B, C, D}, _Port}) -> {A, B, C, D}; +config_peer_to_ip_addr({A, B, C, D, _Port}) -> {A, B, C, D}; +config_peer_to_ip_addr({A, B, C, D}) -> {A, B, C, D}. + +path_to_limiter_ref([<<"chunk">> | _]) -> chunk; +path_to_limiter_ref([<<"chunk2">> | _]) -> chunk; +path_to_limiter_ref([<<"data_sync_record">> | _]) -> data_sync_record; +path_to_limiter_ref([<<"recent_hash_list_diff">> | _]) -> recent_hash_list_diff; +path_to_limiter_ref([<<"hash_list">>]) -> block_index; +path_to_limiter_ref([<<"hash_list2">>]) -> block_index; +path_to_limiter_ref([<<"block_index">>]) -> block_index; +path_to_limiter_ref([<<"block_index2">>]) -> block_index; +path_to_limiter_ref([<<"block">>, _Type, _ID, <<"hash_list">>]) -> block_index; +path_to_limiter_ref([<<"wallet_list">>]) -> wallet_list; +path_to_limiter_ref([<<"block">>, _Type, _ID, <<"wallet_list">>]) -> wallet_list; +path_to_limiter_ref([<<"vdf">>]) -> get_vdf; +path_to_limiter_ref([<<"vdf">>, <<"session">>]) -> get_vdf_session; +path_to_limiter_ref([<<"vdf2">>, <<"session">>]) -> get_vdf_session; +path_to_limiter_ref([<<"vdf3">>, <<"session">>]) -> get_vdf_session; +path_to_limiter_ref([<<"vdf4">>, <<"session">>]) -> get_vdf_session; +path_to_limiter_ref([<<"vdf">>, <<"previous_session">>]) -> get_previous_vdf_session; +path_to_limiter_ref([<<"vdf2">>, <<"previous_session">>]) -> get_previous_vdf_session; +%% No vdf3 prev_session in ar_blacklist_middleware.hrl ?RPM_BY_PATH +path_to_limiter_ref([<<"vdf4">>, <<"previous_session">>]) -> get_previous_vdf_session; +path_to_limiter_ref([<<"metrics">> | _ ])-> metrics; +path_to_limiter_ref(_) -> general. diff --git a/apps/arweave/src/ar_http_iface_server.erl b/apps/arweave/src/ar_http_iface_server.erl index 7f8d3c06c2..45b97254a7 100644 --- a/apps/arweave/src/ar_http_iface_server.erl +++ b/apps/arweave/src/ar_http_iface_server.erl @@ -15,7 +15,7 @@ -include_lib("eunit/include/eunit.hrl"). -define(HTTP_IFACE_MIDDLEWARES, [ - ar_blacklist_middleware, + ar_http_iface_rate_limiter_middleware, ar_network_middleware, cowboy_router, ar_http_iface_middleware, diff --git a/apps/arweave/test/ar_http_iface_tests.erl b/apps/arweave/test/ar_http_iface_tests.erl index a0e321e821..df7cd9e872 100644 --- a/apps/arweave/test/ar_http_iface_tests.erl +++ b/apps/arweave/test/ar_http_iface_tests.erl @@ -24,6 +24,7 @@ start_node() -> reset_node() -> ar_blacklist_middleware:reset(), + arweave_limiter_sup:reset_all(), ar_test_node:remote_call(peer1, ar_blacklist_middleware, reset, []), ar_test_node:connect_to_peer(peer1). @@ -281,18 +282,22 @@ test_single_regossip(_) -> test_node_blacklisting_get_spammer() -> {ok, Config} = arweave_config:get_env(), {RequestFun, ErrorResponse} = get_fun_msg_pair(get_info), + LimitWithBursts = Config#config.'http_api.limiter.general.sliding_window_limit' + + Config#config.'http_api.limiter.general.leaky_limit', node_blacklisting_test_frame( RequestFun, ErrorResponse, - Config#config.requests_per_minute_limit div 2 + 1, + LimitWithBursts, 1 ). test_node_blacklisting_post_spammer() -> {ok, Config} = arweave_config:get_env(), + LimitWithBursts = Config#config.'http_api.limiter.general.sliding_window_limit' + + Config#config.'http_api.limiter.general.leaky_limit', {RequestFun, ErrorResponse} = get_fun_msg_pair(send_tx_binary), NErrors = 11, - NRequests = Config#config.requests_per_minute_limit div 2 + NErrors, + NRequests = LimitWithBursts + NErrors, node_blacklisting_test_frame( RequestFun, ErrorResponse, @@ -333,6 +338,7 @@ send_tx_binary(Index, InvalidTX) -> -spec node_blacklisting_test_frame(fun(), any(), non_neg_integer(), non_neg_integer()) -> ok. node_blacklisting_test_frame(RequestFun, ErrorResponse, NRequests, ExpectedErrors) -> ar_blacklist_middleware:reset(), + arweave_limiter_sup:reset_all(), ar_rate_limiter:off(), Responses = ar_util:batch_pmap( RequestFun, @@ -342,13 +348,15 @@ node_blacklisting_test_frame(RequestFun, ErrorResponse, NRequests, ExpectedError ), ?assertEqual(length(Responses), NRequests), ar_blacklist_middleware:reset(), + arweave_limiter_sup:reset_all(), Got = count_by_response_type(ErrorResponse, Responses), %% Other test nodes may occasionally make some requests in the background disturbing the stats. Tolerance = 5, - ?debugFmt("ExpectedErrors: ~p, Tolerance: ~p, Got: ~p~n", [ExpectedErrors, Tolerance, maps:get(error_responses, Got)]), - ?assert(maps:get(error_responses, Got) =< ExpectedErrors + Tolerance), - ?assert(maps:get(error_responses, Got) >= ExpectedErrors - Tolerance), - ?assertEqual(NRequests - maps:get(error_responses, Got), maps:get(ok_responses, Got)), + ?debugFmt("Requests sent: ~p, ExpectedErrors: ~p, Tolerance: ~p, Got: ~p~n", + [NRequests, ExpectedErrors, Tolerance, maps:get(error_responses, Got, 0)]), + ?assert(maps:get(error_responses, Got, 0) =< ExpectedErrors + Tolerance), + ?assert(maps:get(error_responses, Got, 0) >= ExpectedErrors - Tolerance), + ?assertEqual(NRequests - maps:get(error_responses, Got, 0), maps:get(ok_responses, Got, 0)), ar_rate_limiter:on(). %% @doc Count the number of successful and error responses. @@ -756,7 +764,7 @@ test_get_tx_status(_) -> case FetchStatus() of {ok, {{<<"200">>, _}, _, _, _, _}} -> true; _ -> false - end + end end, 200, 5000 diff --git a/apps/arweave/test/ar_test_node.erl b/apps/arweave/test/ar_test_node.erl index c811563600..5ea1c119d9 100644 --- a/apps/arweave/test/ar_test_node.erl +++ b/apps/arweave/test/ar_test_node.erl @@ -302,7 +302,9 @@ start_node(B0, Config) -> start_node(B0, Config, WaitUntilSync) -> ?LOG_INFO("Starting node"), clean_up_and_stop(), + prometheus:start(), arweave_config:start(), + arweave_limiter:start(), {ok, BaseConfig} = arweave_config:get_env(), write_genesis_files(BaseConfig#config.data_dir, B0), update_config(Config), @@ -603,7 +605,9 @@ start() -> start(#{}). start(Options) when is_map(Options) -> + prometheus:start(), arweave_config:start(), + ok = arweave_limiter:start(), B0 = case maps:get(b0, Options, not_set) of not_set -> @@ -654,7 +658,9 @@ start(B0, RewardAddr, Config) -> %% Config after the test is done. Otherwise the tests that run after yours may fail. start(B0, RewardAddr, Config, StorageModules) -> clean_up_and_stop(), + prometheus:start(), arweave_config:start(), + arweave_limiter:start(), write_genesis_files(Config#config.data_dir, B0), ok = arweave_config:set_env(Config#config{ start_from_latest_state = true, diff --git a/apps/arweave_config/include/arweave_config.hrl b/apps/arweave_config/include/arweave_config.hrl index 09bb5211d7..3433d401cf 100644 --- a/apps/arweave_config/include/arweave_config.hrl +++ b/apps/arweave_config/include/arweave_config.hrl @@ -178,6 +178,112 @@ -define(DEFAULT_COWBOY_TCP_SEND_TIMEOUT, 15_000). -define(DEFAULT_COWBOY_TCP_LISTENER_SHUTDOWN, 5000). +%% Common RLG Settings +-define(DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, 120000). +-define(DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, 120000). +-define(DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, false). + +%% General RLG +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT, 150). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION, 1000). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, 150). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL, 1000). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION, 30). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT, 150). + +%% Chunk RLG +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_LIMIT, 100). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_DURATION, 1000). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_LIMIT, 100). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_INTERVAL, 1000). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_REDUCTION, 30). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_CONCURRENCY_LIMIT, 200). + +%% Data Sync RLG +-define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_DURATION, 1000). +-define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_LIMIT, 20). +-define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_REDUCTION, 20). +-define(DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_CONCURRENCY_LIMIT, 40). + +%% Recent Hash List Diff RLG +-define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_DURATION, 1000). +-define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_LIMIT, 120). +-define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_REDUCTION, 120). +-define(DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_CONCURRENCY_LIMIT, 240). + +%% Block Index RLG +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_DURATION, 1000). +-ifdef(AR_TEST). +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_LIMIT, 10). +-else. +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_LIMIT, 1). +-endif. +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_REDUCTION, 1). +-define(DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_CONCURRENCY_LIMIT, 2). + +%% Wallet list RLG +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_DURATION, 1000). +-ifdef(AR_TEST). +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_LIMIT, 10). +-else. +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_LIMIT, 1). +-endif. +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_REDUCTION, 1). +-define(DEFAULT_HTTP_API_LIMITER_WALLET_LIST_CONCURRENCY_LIMIT, 2). + +%% Get VDF RLG +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_DURATION, 1000). +-ifdef(AR_TEST). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_LIMIT, 4500). +-else. +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_LIMIT, 90). +-endif. +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_REDUCTION, 90). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_CONCURRENCY_LIMIT, 90). + +%% VDF Session RLG +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_DURATION, 1000). +-ifdef(AR_TEST). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 4500). +-else. +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 30). +-endif. +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_REDUCTION, 30). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_CONCURRENCY_LIMIT, 30). + +%% Previous VDF Session RLG +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_DURATION, 1000). +-ifdef(AR_TEST). +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 4500). +-else. +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 30). +-endif. +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_REDUCTION, 30). +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_CONCURRENCY_LIMIT, 30). + +%% Metrics RLG +-define(DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_LIMIT, 0). +-define(DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_DURATION, 1000). +-define(DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_LIMIT, 2). +-define(DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_INTERVAL, 1000). +-define(DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_REDUCTION, 2). +-define(DEFAULT_HTTP_API_LIMITER_METRICS_CONCURRENCY_LIMIT, 2). + + %% @doc Startup options with default values. -record(config, { init = false, @@ -323,7 +429,199 @@ 'http_api.tcp.nodelay' = ?DEFAULT_COWBOY_TCP_NODELAY, 'http_api.tcp.num_acceptors' = ?DEFAULT_COWBOY_TCP_NUM_ACCEPTORS, 'http_api.tcp.send_timeout_close' = ?DEFAULT_COWBOY_TCP_SEND_TIMEOUT_CLOSE, - 'http_api.tcp.send_timeout' = ?DEFAULT_COWBOY_TCP_SEND_TIMEOUT + 'http_api.tcp.send_timeout' = ?DEFAULT_COWBOY_TCP_SEND_TIMEOUT, + + 'http_api.limiter.general.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.general.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION, + 'http_api.limiter.general.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.general.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, + 'http_api.limiter.general.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL, + 'http_api.limiter.general.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION, + 'http_api.limiter.general.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT, + 'http_api.limiter.general.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.chunk.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.chunk.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_DURATION, + 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.chunk.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_LIMIT, + 'http_api.limiter.chunk.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_INTERVAL, + 'http_api.limiter.chunk.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_REDUCTION, + 'http_api.limiter.chunk.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_CHUNK_CONCURRENCY_LIMIT, + 'http_api.limiter.chunk.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.data_sync_record.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.data_sync_record.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_SLIDING_WINDOW_DURATION, + 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.data_sync_record.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_LIMIT, + 'http_api.limiter.data_sync_record.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_INTERVAL, + 'http_api.limiter.data_sync_record.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_LEAKY_TICK_REDUCTION, + 'http_api.limiter.data_sync_record.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_DATA_SYNC_RECORD_CONCURRENCY_LIMIT, + 'http_api.limiter.data_sync_record.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.recent_hash_list_diff.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.recent_hash_list_diff.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_SLIDING_WINDOW_DURATION, + 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.recent_hash_list_diff.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_LIMIT, + 'http_api.limiter.recent_hash_list_diff.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_INTERVAL, + 'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_LEAKY_TICK_REDUCTION, + 'http_api.limiter.recent_hash_list_diff.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_RECENT_HASH_LIST_DIFF_CONCURRENCY_LIMIT, + 'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.block_index.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.block_index.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_SLIDING_WINDOW_DURATION, + 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.block_index.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_LIMIT, + 'http_api.limiter.block_index.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_INTERVAL, + 'http_api.limiter.block_index.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_LEAKY_TICK_REDUCTION, + 'http_api.limiter.block_index.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_BLOCK_INDEX_CONCURRENCY_LIMIT, + 'http_api.limiter.block_index.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.wallet_list.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.wallet_list.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_SLIDING_WINDOW_DURATION, + 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.wallet_list.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_LIMIT, + 'http_api.limiter.wallet_list.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_INTERVAL, + 'http_api.limiter.wallet_list.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_LEAKY_TICK_REDUCTION, + 'http_api.limiter.wallet_list.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_WALLET_LIST_CONCURRENCY_LIMIT, + 'http_api.limiter.wallet_list.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.get_vdf.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.get_vdf.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SLIDING_WINDOW_DURATION, + 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.get_vdf.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_LIMIT, + 'http_api.limiter.get_vdf.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_INTERVAL, + 'http_api.limiter.get_vdf.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_LEAKY_TICK_REDUCTION, + 'http_api.limiter.get_vdf.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_CONCURRENCY_LIMIT, + 'http_api.limiter.get_vdf.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.get_vdf_session.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.get_vdf_session.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_DURATION, + 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.get_vdf_session.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, + 'http_api.limiter.get_vdf_session.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_INTERVAL, + 'http_api.limiter.get_vdf_session.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_TICK_REDUCTION, + 'http_api.limiter.get_vdf_session.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_CONCURRENCY_LIMIT, + 'http_api.limiter.get_vdf_session.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.get_previous_vdf_session.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.get_previous_vdf_session.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_DURATION, + 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.get_previous_vdf_session.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, + 'http_api.limiter.get_previous_vdf_session.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_INTERVAL, + 'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_TICK_REDUCTION, + 'http_api.limiter.get_previous_vdf_session.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_CONCURRENCY_LIMIT, + 'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, + + 'http_api.limiter.metrics.sliding_window_limit' = + ?DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_LIMIT, + 'http_api.limiter.metrics.sliding_window_duration' = + ?DEFAULT_HTTP_API_LIMITER_METRICS_SLIDING_WINDOW_DURATION, + 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL, + 'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry' = + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY, + 'http_api.limiter.metrics.leaky_limit' = + ?DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_LIMIT, + 'http_api.limiter.metrics.leaky_tick_interval' = + ?DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_INTERVAL, + 'http_api.limiter.metrics.leaky_tick_reduction' = + ?DEFAULT_HTTP_API_LIMITER_METRICS_LEAKY_TICK_REDUCTION, + 'http_api.limiter.metrics.concurrency_limit' = + ?DEFAULT_HTTP_API_LIMITER_METRICS_CONCURRENCY_LIMIT, + 'http_api.limiter.metrics.is_manual_reduction_disabled' = + ?DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED + + }). -endif. diff --git a/apps/arweave_limiter/include/.gitkeep b/apps/arweave_limiter/include/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apps/arweave_limiter/priv/.gitkeep b/apps/arweave_limiter/priv/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apps/arweave_limiter/src/arweave_limiter.app.src b/apps/arweave_limiter/src/arweave_limiter.app.src new file mode 100644 index 0000000000..af163ae6ed --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter.app.src @@ -0,0 +1,30 @@ +{application, arweave_limiter, + [ + {id, "arweave_limiter"}, + {description, "Arweave Rate Limiter"}, + {vsn, "0.0.1"}, + {mod, {arweave_limiter, []}}, + {env, []}, + {applications, [ + kernel, + stdlib, + sasl, + arweave_config, + prometheus, + prometheus_cowboy, + prometheus_process_collector, + prometheus_httpd, + runtime_tools + ]}, + {modules, [ + arweave_limiter, + arweave_limiter_sup, + arweave_limiter_time, + arweave_limiter_metrics, + arweave_limiter_metrics_collector + ]}, + {registered, [ + arweave_limiter, + arweave_limiter_sup + ]} + ]}. diff --git a/apps/arweave_limiter/src/arweave_limiter.erl b/apps/arweave_limiter/src/arweave_limiter.erl new file mode 100644 index 0000000000..415f29ce97 --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter.erl @@ -0,0 +1,86 @@ +%%%=================================================================== +%%% GNU General Public License, version 2 (GPL-2.0) +%%% The GNU General Public License (GPL-2.0) +%%% Version 2, June 1991 +%%% +%%% ------------------------------------------------------------------ +%%% +%%% @copyright 2025 (c) Arweave +%%% @author Arweave Team +%%% @author Kristof Hetzl +%%% @doc Arweave Rate Limiter. +%%% +%%% `arweave_limiter' module is an interface to the Arweave +%%% Rate Limiter functionality. +%%% +%%% @end +%%%=================================================================== +-module(arweave_limiter). +-vsn(1). +-behavior(application). +-export([ + start/0, + start/2, + stop/0, + stop/1 + ]). + +-export([register_or_reject_call/2, reduce_for_peer/2]). + +-include_lib("kernel/include/logger.hrl"). + + +%%-------------------------------------------------------------------- +%% @doc helper function to start `arweave_config' application. +%% @end +%%-------------------------------------------------------------------- +-spec start() -> ok | {error, term()}. + +start() -> + case application:ensure_all_started(?MODULE, permanent) of + {ok, Dependencies} -> + ?LOG_DEBUG("arweave_limiter started dependencies: ~p", Dependencies), + ok; + Elsewise -> + Elsewise + end. + +%%-------------------------------------------------------------------- +%% @doc Application API function to start `arweave_config' app. +%% @end +%%-------------------------------------------------------------------- +-spec start(term(), term()) -> {ok, pid()}. +start(_StartType, _StartArgs) -> + arweave_limiter_sup:start_link(). + +%%-------------------------------------------------------------------- +%% @doc help function to stop `arweave_config' application. +%% @end +%%-------------------------------------------------------------------- +-spec stop() -> ok. + +stop() -> + application:stop(?MODULE). + +%%-------------------------------------------------------------------- +%% @doc help function to stop `arweave_config' application. +%% @end +%%-------------------------------------------------------------------- +-spec stop(term()) -> ok. +stop(_State) -> + ok. + +%%-------------------------------------------------------------------- +%% @doc Rate limit request +%% @end +%%-------------------------------------------------------------------- +register_or_reject_call(LimiterRef, Peer) -> + arweave_limiter_group:register_or_reject_call(LimiterRef, Peer). + + +%%-------------------------------------------------------------------- +%% @doc Reduce leaky tokens for peer. +%% @end +%%-------------------------------------------------------------------- +reduce_for_peer(LimiterRef, Peer) -> + arweave_limiter_group:reduce_for_peer(LimiterRef, Peer). diff --git a/apps/arweave_limiter/src/arweave_limiter_group.erl b/apps/arweave_limiter/src/arweave_limiter_group.erl new file mode 100644 index 0000000000..6bd7173cad --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter_group.erl @@ -0,0 +1,379 @@ +%%% +%%% @doc Leaky bucket token rate limiter based on +%%% https://gist.github.com/humaite/21a84c3b3afac07fcebe476580f3a40b +%%% combined with a concurrency limiter similar to Ranch's connection pool. +%%% The leaky bucket limiter sits on top of a sliding window limiter. +%%% +%%% Concurrency is validated first, then sliding window, followed by leaky +%%% bucket. If sliding windows passes, the call is accepted, otherwise it +%%% burns leaky tokens, if those are exhausted as well, the call will be +%%% marked as rejected. +%%% It only stores data in process memory. +%%% +-module(arweave_limiter_group). + +-behaviour(gen_server). + +%% API +-export([ + start_link/2, + info/1, + config/1, + register_or_reject_call/2, + reduce_for_peer/2, + reset_all/1, + stop/1 + ]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3, format_status/2]). + +-ifdef(AR_TEST). +-export([ + expire_and_get_requests/4, + drop_expired/3, + add_and_order_timestamps/2, + cleanup_expired_sliding_peers/3]). +-endif. + +-include_lib("arweave/include/ar.hrl"). +-include_lib("arweave_config/include/arweave_config.hrl"). + +%%% API +start_link(LimiterRef, Config) -> + gen_server:start_link({local, LimiterRef}, ?MODULE, [Config], []). + +info(LimiterRef) -> + gen_server:call(LimiterRef, get_info). + +config(LimiterRef) -> + gen_server:call(LimiterRef, get_config). + +register_or_reject_call(LimiterRef, Peer) -> + {Time, Value} = timer:tc(fun do_register_or_reject_call/2, [LimiterRef, Peer]), + prometheus_histogram:observe(ar_limiter_response_time_microseconds, [atom_to_list(LimiterRef)], Time), + Value. + +do_register_or_reject_call(LimiterRef, Peer) -> + prometheus_counter:inc(ar_limiter_requests_total, + [atom_to_list(LimiterRef)]), + case gen_server:call(LimiterRef, {register_or_reject, Peer}) of + {reject, Reason, _Data} = Rejection -> + prometheus_counter:inc(ar_limiter_rejected_total, + [atom_to_list(LimiterRef), atom_to_list(Reason)]), + Rejection; + Accept -> + Accept + end. + +%% This function is called when a transaction is accepted. This is how the previous +%% solution dealt with high loads. This will perform double reduction. (as the periodic +%% reduction is still occurring). +reduce_for_peer(LimiterRef, Peer) -> + Result = gen_server:call(LimiterRef, {reduce_for_peer, Peer}), + Result == ok andalso prometheus_counter:inc(ar_limiter_reduce_requests_total, + [atom_to_list(LimiterRef)]), + Result. + +reset_all(LimiterRef) -> + whereis(LimiterRef) == undefined orelse gen_server:call(LimiterRef, reset_all). + +stop(LimiterRef) -> + gen_server:stop(LimiterRef). + +%% gen_server callbacks +init([Config] = _Args) -> + Id = maps:get(id, Config), + + IsDisabled = maps:get(no_limit, Config, false), + IsManualReductionDisabled = maps:get(is_manual_reduction_disabled, Config, false), + + LeakyTickMs = maps:get(leaky_tick_interval_ms, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL), + TimestampCleanupTickMs = maps:get(timestamp_cleanup_interval_ms, Config, + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL), + TimestampCleanupExpiry = maps:get(timestamp_cleanup_expiry, Config, + ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY), + LeakyRateLimit = maps:get(leaky_rate_limit, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT), + ConcurrencyLimit = maps:get(concurrency_limit, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT), + TickReduction = maps:get(tick_reduction, Config, + ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION), + SlidingWindowDuration = maps:get(sliding_window_duration, Config, + ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION), + SlidingWindowLimit = maps:get(sliding_window_limit, Config, + ?DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT), + + {ok, LeakyRef} = timer:send_interval(LeakyTickMs, self(), {tick, leaky_bucket_reduction}), + {ok, TsRef} = timer:send_interval(TimestampCleanupTickMs, self(), {tick, sliding_window_timestamp_cleanup}), + {ok, #{ + id => atom_to_list(Id), + is_disabled => IsDisabled, + is_manual_reduction_disabled => IsManualReductionDisabled, + leaky_tick_timer_ref => LeakyRef, + timestamp_cleanup_timer_ref => TsRef, + leaky_tick_ms => LeakyTickMs, + timestamp_cleanup_tick_ms => TimestampCleanupTickMs, + timestamp_cleanup_expiry => TimestampCleanupExpiry, + tick_reduction => TickReduction, + leaky_rate_limit => LeakyRateLimit, + concurrency_limit => ConcurrencyLimit, + concurrent_requests => #{}, %% Peer -> List of {MonitorRef, Pid} + concurrent_monitors => #{}, %% MonitorRef -> Peer + leaky_tokens => #{}, %% Peer -> Leaky Bucket tokens + sliding_window_duration => SlidingWindowDuration, + sliding_window_limit => SlidingWindowLimit, + sliding_timestamps => #{} %% Peer -> Ordered list of timestamps + }}. + +handle_call(reset_all, _From, State) -> + {reply, ok, State#{concurrent_requests => #{}, + concurrent_monitors => #{}, + leaky_tokens => #{}, + sliding_timestamps => #{}}}; +handle_call({register_or_reject, Peer}, {FromPid, _}, + State = #{id := Id, + is_disabled := IsDisabled, + leaky_rate_limit := LeakyRateLimit, + leaky_tokens := LeakyTokens, + concurrency_limit := ConcurrencyLimit, + concurrent_requests := ConcurrentRequests, + concurrent_monitors := ConcurrentMonitors, + sliding_window_duration := SlidingWindowDuration, + sliding_window_limit := SlidingWindowLimit, + sliding_timestamps := SlidingTimestamps + }) -> + Now = arweave_limiter_time:ts_now(), + Tokens = maps:get(Peer, LeakyTokens, 0) + 1, + Concurrency = length(maps:get(Peer, ConcurrentRequests, [])) + 1, + + SlidingTimestampsForPeer0 = + expire_and_get_requests(Peer, SlidingTimestamps, SlidingWindowDuration, Now), + case IsDisabled of + true -> + {reply, {register, no_limiting_applied}, State}; + _ -> + case Concurrency > ConcurrencyLimit of + true -> + %% Concurrency Hard Limit + ?LOG_DEBUG([{event, ar_limiter_reject}, {reason, concurrency}, + {peer, Peer}, {id, Id}]), + {reply, {reject, concurrency, data}, State}; + _ -> + case length(SlidingTimestampsForPeer0) + 1 > SlidingWindowLimit of + true -> + %% Sliding Window limited, check Leaky Bucket Tokens + case Tokens > LeakyRateLimit of + true -> + %% Burst exhausted with the Leaky Tokens + ?LOG_DEBUG([{event, ar_limiter_reject}, {reason, rate_limit}, + {sliding_window_limit, SlidingWindowLimit}, + {leaky_rate_limit, LeakyRateLimit}, + {peer, Peer}, {id, Id}]), + {reply, {reject, rate_limit, data}, State}; + false -> + NewLeakyTokens = update_token(Peer, Tokens, LeakyTokens), + {NewRequests, NewMonitors} = + register_concurrent( + Peer, FromPid, ConcurrentRequests, ConcurrentMonitors), + {reply, {register, leaky}, + State#{leaky_tokens => NewLeakyTokens, + concurrent_requests => NewRequests, + concurrent_monitors => NewMonitors}} + end; + _ -> + {NewRequests, NewMonitors} = + register_concurrent( + Peer, FromPid, ConcurrentRequests, ConcurrentMonitors), + SlidingTimestampsForPeer1 = add_and_order_timestamps(Now, SlidingTimestampsForPeer0), + NewSlidingTimestamps = SlidingTimestamps#{Peer => SlidingTimestampsForPeer1}, + {reply, {register, sliding}, State#{sliding_timestamps => NewSlidingTimestamps, + concurrent_requests => NewRequests, + concurrent_monitors => NewMonitors}} + end + end + end; +handle_call({reduce_for_peer, Peer}, _From, State = + #{is_manual_reduction_disabled := false, + leaky_tokens := LeakyTokens}) -> + NewLeakyTokens = do_reduce_for_peer(Peer, LeakyTokens), + {reply, ok, State#{leaky_tokens => NewLeakyTokens}}; +handle_call({reduce_for_peer, _Peer}, _From, State = + #{is_manual_reduction_disabled := true}) -> + {reply, disabled, State}; +handle_call(get_info, _From, State = + #{sliding_timestamps := SlidingTimestamps, + leaky_tokens := LeakyTokens, + concurrent_requests := ConcurrentRequests, + concurrent_monitors := ConcurrentMonitors}) -> + {reply, #{sliding_timestamps => SlidingTimestamps, + leaky_tokens => LeakyTokens, + concurrent_requests => ConcurrentRequests, + concurrent_monitors => ConcurrentMonitors}, State}; +handle_call(get_config, _From, State) -> + {reply, filter_state_for_config(State), State}; +handle_call(Request, From, State = #{id := Id}) -> + ?LOG_WARNING([{event, unhandled_call}, {id, Id}, {module, ?MODULE}, + {request, Request}, {from, From}, + {config, filter_state_for_config(State)}]), + {reply, ok, State}. + +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info({tick, sliding_window_timestamp_cleanup}, + State = #{id := Id, sliding_timestamps := SlidingTimestamps, + timestamp_cleanup_expiry := CleanupExpiry}) -> + Now = arweave_limiter_time:ts_now(), + NewSlidingTimestamps = cleanup_expired_sliding_peers(SlidingTimestamps, CleanupExpiry, Now), + Deleted = maps:size(SlidingTimestamps) - maps:size(NewSlidingTimestamps), + prometheus_counter:inc(ar_limiter_cleanup_tick_expired_sliding_peers_deleted_total, [Id], Deleted), + {noreply, State#{sliding_timestamps => NewSlidingTimestamps}}; +handle_info({tick, leaky_bucket_reduction}, + State = #{id := Id, tick_reduction := TickReduction, leaky_tokens := LeakyTokens}) -> + %% This is going to be more precise than ar_limiter_leaky_ticks*ar_limiter_peers + prometheus_counter:inc(ar_limiter_leaky_ticks, [Id]), + SizeBefore = maps:size(LeakyTokens), + prometheus_counter:inc(ar_limiter_leaky_tick_reductions_peer, [Id], SizeBefore), + NewTokens = + maps:fold(fun(Key, Value, AccIn) -> + fold_decrease_rate(Id, Key, Value, AccIn, TickReduction) + end, #{}, LeakyTokens), + prometheus_counter:inc( + ar_limiter_leaky_tick_delete_peer_total, [Id], SizeBefore - maps:size(NewTokens)), + {noreply, State#{leaky_tokens => NewTokens}}; +handle_info({'DOWN', MonitorRef, process, Pid, Reason}, + State = #{concurrent_requests := ConcurrentRequests, + concurrent_monitors := ConcurrentMonitors}) -> + {NewConcurrentRequests, NewConcurrentMonitors} = + remove_concurrent( + MonitorRef, Pid, Reason, ConcurrentRequests, ConcurrentMonitors), + {noreply, State#{concurrent_requests => NewConcurrentRequests, + concurrent_monitors => NewConcurrentMonitors}}; +handle_info(Info, State = #{id := Id}) -> + ?LOG_WARNING([{event, unhandled_info}, {id, Id}, {module, ?MODULE}, {info, Info}]), + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +format_status(_Opt, Status) -> + Status. + +%%% Internal functions + +%% Sliding window manipulation +expire_and_get_requests(Peer, SlidingTimestamps, SlidingWindowDuration, Now) -> + Timestamps = maps:get(Peer, SlidingTimestamps, []), + drop_expired(Timestamps, SlidingWindowDuration, Now). + +drop_expired([TS|Timestamps], WindowDuration, Now) when TS + WindowDuration =< Now -> + drop_expired(Timestamps, WindowDuration, Now); +drop_expired(Timestamps, _WindowDuration, _Now) -> + Timestamps. + +%% There is no idomatic way of adding an element to the end of a list in Erlang. +%% So, we reverse the list add it to the beginning and reverse it again. +add_and_order_timestamps(Ts, Timestamps) -> + lists:reverse(do_add_and_order_timestamps(Ts, lists:reverse(Timestamps))). + +do_add_and_order_timestamps(Ts, []) -> + [Ts]; +do_add_and_order_timestamps(Ts, [Head | _Rest] = Timestamps) when Ts >= Head -> + [Ts | Timestamps]; +do_add_and_order_timestamps(Ts, [Head | Rest]) -> + %% This clause shouldn't really reached, because we use monotonic time + %% for timestamps. + [Head | do_add_and_order_timestamps(Ts, Rest)]. + +cleanup_expired_sliding_peers(SlidingTimestamps, WindowDuration, Now) -> + maps:fold(fun(Peer, TsList, AccIn) -> + case drop_expired(TsList, WindowDuration, Now) of + [] -> + AccIn; + ValidTimestamps -> + AccIn#{Peer => ValidTimestamps} + end + end, #{}, SlidingTimestamps). + +%% Token manipulation +update_token(Peer, Token, LeakyToken) -> + maps:put(Peer, Token, LeakyToken). + +do_reduce_for_peer(Peer, LeakyTokens) -> + case maps:get(Peer, LeakyTokens, 0) of + 0 -> + LeakyTokens; + Tokens -> + LeakyTokens#{Peer => Tokens - 1} + end. + +fold_decrease_rate(_Id, _Key, Counter, Acc, _TickReduction) + when is_integer(Counter), Counter =< 0 -> + Acc; +fold_decrease_rate(Id, Key, Counter, Acc, TickReduction) when Counter < TickReduction -> + prometheus_counter:inc(ar_limiter_leaky_tick_token_reductions_total, [Id], Counter), + maps:put(Key, 0, Acc); +fold_decrease_rate(Id, Key, Counter, Acc, TickReduction) -> + prometheus_counter:inc(ar_limiter_leaky_tick_token_reductions_total, [Id], TickReduction), + maps:put(Key, Counter-TickReduction, Acc). + +%% Concurrency magic +register_concurrent(Peer, Pid, ConcurrentRequests, ConcurrentMonitors) -> + MonitorRef = erlang:monitor(process, Pid), + Processes = maps:get(Peer, ConcurrentRequests, []), + NewConcurrentRequests = maps:put(Peer, [{MonitorRef, Pid} | Processes], ConcurrentRequests), + NewConcurrentMonitors = maps:put(MonitorRef, Peer, ConcurrentMonitors), + {NewConcurrentRequests, NewConcurrentMonitors}. + +remove_concurrent(MonitorRef, _Pid, _Reason, ConcurrentRequests, ConcurrentMonitors) -> + %% Peer for a MonitorRef shouldn't be undefined, because we started to + %% monitor the process as a first thing when register was called. + case maps:get(MonitorRef, ConcurrentMonitors, not_found) of + not_found -> + %% MonitorRef not found. This happens when we reset all the peers + %% manually. This also means everything else has been deleted as well. + %% Nothing to do, just return the current state. + {ConcurrentRequests, ConcurrentMonitors}; + Peer -> + ConcurrentForPeer = maps:get(Peer, ConcurrentRequests), + NewConcurrentForPeer = proplists:delete(MonitorRef, ConcurrentForPeer), + NewConcurrentRequests = + case NewConcurrentForPeer of + [] -> + maps:remove(Peer, ConcurrentRequests); + _ -> + ConcurrentRequests#{Peer => NewConcurrentForPeer} + end, + NewConcurrentMonitors = maps:remove(MonitorRef, ConcurrentMonitors), + {NewConcurrentRequests, NewConcurrentMonitors} + end. + +filter_state_for_config(#{id := Id, + is_disabled := IsDisabled, + is_manual_reduction_disabled := IsManualReductionDisabled, + leaky_tick_timer_ref := LeakyRef, + timestamp_cleanup_timer_ref := TsRef, + leaky_tick_ms := LeakyTickMs, + timestamp_cleanup_tick_ms := TimestampCleanupTickMs, + timestamp_cleanup_expiry := TimestampCleanupExpiry, + tick_reduction := TickReduction, + leaky_rate_limit := LeakyRateLimit, + concurrency_limit := ConcurrencyLimit, + sliding_window_duration := SlidingWindowDuration, + sliding_window_limit := SlidingWindowLimit}) -> + #{id => Id, + is_disabled => IsDisabled, + is_manual_reduction_disabled => IsManualReductionDisabled, + leaky_tick_timer_ref => LeakyRef, + timestamp_cleanup_timer_ref => TsRef, + leaky_tick_ms => LeakyTickMs, + timestamp_cleanup_tick_ms => TimestampCleanupTickMs, + timestamp_cleanup_expiry => TimestampCleanupExpiry, + tick_reduction => TickReduction, + leaky_rate_limit => LeakyRateLimit, + concurrency_limit => ConcurrencyLimit, + sliding_window_duration => SlidingWindowDuration, + sliding_window_limit => SlidingWindowLimit}. diff --git a/apps/arweave_limiter/src/arweave_limiter_metrics.erl b/apps/arweave_limiter/src/arweave_limiter_metrics.erl new file mode 100644 index 0000000000..08f0b3273e --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter_metrics.erl @@ -0,0 +1,64 @@ +-module(arweave_limiter_metrics). + +-export([register/0]). + +%%%=================================================================== +%%% Public interface. +%%%=================================================================== + +%% @doc Declare Arweave Rate Limiter metrics. +register() -> + ok = prometheus_histogram:new([ + {name, ar_limiter_response_time_microseconds}, + {help, "Time it took for the limiter to respond to requests"}, + %% buckets might be reduced for production + {buckets, [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50]}, + {labels, [limiter_id]}]), + + ok = prometheus_counter:new([ + {name, ar_limiter_requests_total}, + {help, "The number of requests the limiter has processed"}, + {labels, [limiter_id]}]), + ok = prometheus_counter:new([{name, ar_limiter_rejected_total}, + {help, "The number of request were rejected by the limiter"}, + {labels, [limiter_id, reason]} + ]), + ok = prometheus_counter:new([{name, ar_limiter_reduce_requests_total}, + {help, "The number of reduce request by peer in total"}, + {labels, [limiter_id]} + ]), + ok = prometheus_gauge:new([ + {name, ar_limiter_peers}, + {help, "The number of peers the limiter is monitoring currently"}, + %% limiting type: + %% sliding_window -> baseline, leaky_bucket -> burst, concurrency -> concurrency + {labels, [limiter_id, limiting_type]}]), + ok = prometheus_gauge:new([ + {name, ar_limiter_tracked_items_total}, + {help, "The number of timestamps, leaky tokens, concurrent processes are tracked"}, + %% limiting type: + %% sliding_window -> baseline, leaky_bucket -> burst, concurrency -> concurrency + {labels, [limiter_id, limiting_type]}]), + ok = prometheus_counter:new([ + {name, ar_limiter_leaky_ticks}, + {help, "The number of leaky bucket ticks the limiter has processed"}, + {labels, [limiter_id]}]), + ok = prometheus_counter:new([ + {name, ar_limiter_leaky_tick_delete_peer_total}, + {help, "The number of times a peer has been dropped from the leaky bucket token register"}, + {labels, [limiter_id]}]), + ok = prometheus_counter:new([ + {name, ar_limiter_cleanup_tick_expired_sliding_peers_deleted_total}, + {help, "The number of times a peer has been dropped from the sliding window timestamp register"}, + {labels, [limiter_id]}]), + ok = prometheus_counter:new([ + %% To show how much tokens clients are burning for bursts. + {name, ar_limiter_leaky_tick_token_reductions_total}, + {help, "All the consumed leaky bucket tokens that were reduced for all peers in total"}, + {labels, [limiter_id]}]), + ok = prometheus_counter:new([ + %% To see how many peers bite into their burst tokens. (in a period) + {name, ar_limiter_leaky_tick_reductions_peer}, + {help, "The times a leaky bucket token reduction had have to be performed for a peer"}, + {labels, [limiter_id]}]), + ok. diff --git a/apps/arweave_limiter/src/arweave_limiter_metrics_collector.erl b/apps/arweave_limiter/src/arweave_limiter_metrics_collector.erl new file mode 100644 index 0000000000..f24ab22a98 --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter_metrics_collector.erl @@ -0,0 +1,85 @@ +-module(arweave_limiter_metrics_collector). + +-behaviour(prometheus_collector). + +-export([ + deregister_cleanup/1, + collect_mf/2 +]). + +-ifdef(AR_TEST). +-export([ + metrics/0, + tracked_items/1, + peers/1 + ]). +-endif. + +-import(prometheus_model_helpers, [create_mf/4]). + +-include_lib("prometheus/include/prometheus.hrl"). +-define(METRIC_NAME_PREFIX, "arweave_"). + +%% =================================================================== +%% API +%% =================================================================== + +%% called to collect Metric Families +-spec collect_mf(_Registry, Callback) -> ok when + _Registry :: prometheus_registry:registry(), + Callback :: prometheus_collector:callback(). +collect_mf(_Registry, Callback) -> + Metrics = metrics(), + [add_metric_family(Metric, Callback) || Metric <- Metrics], + ok. + +%% called when collector deregistered +deregister_cleanup(_Registry) -> ok. + +%% =================================================================== +%% Private functions +%% =================================================================== + +add_metric_family({Name, Type, Help, Metrics}, Callback) -> + Callback(create_mf(?METRIC_NAME(Name), Help, Type, Metrics)). + +metrics() -> + AllInfo = arweave_limiter_sup:all_info(), + [ + {ar_limiter_tracked_items_total, gauge, "tracked requests, timestamps, leaky tokens", tracked_items(AllInfo)}, + {ar_limiter_peers, gauge, "", peers(AllInfo)} + ]. + +tracked_items(AllInfo) -> + lists:foldl(fun tracked_items_info/2, [], AllInfo). + +tracked_items_info({Id, Info}, Acc) -> + SlidingTimestamps = count_sliding_timestamps(Info), + Monitors = maps:get(concurrent_monitors, Info), + LeakyPeers = maps:get(leaky_tokens, Info), + Items = [ + {[{limiter_id, Id}, {limiting_type, concurrency}], maps:size(Monitors)}, + {[{limiter_id, Id}, {limiting_type, leaky_bucket_tokens}], maps:size(LeakyPeers)}, + {[{limiter_id, Id}, {limiting_type, sliding_window_timestamps}], SlidingTimestamps} + ], + Items ++ Acc. + +count_sliding_timestamps(Info) -> + SlidingTimestamps = maps:get(sliding_timestamps, Info), + maps:fold(fun(_Peer, TimestampList, Acc) -> + length(TimestampList) + Acc + end, 0, SlidingTimestamps). + +peers(AllInfo) -> + lists:foldl(fun peers_info/2, [], AllInfo). + +peers_info({Id, Info}, Acc) -> + ConcurrentRequests = maps:get(concurrent_requests, Info), + LeakyPeers = maps:get(leaky_tokens, Info), + SlidingPeers = maps:get(sliding_timestamps, Info), + Items = [ + {[{limiter_id, Id}, {limiting_type, concurrency}], maps:size(ConcurrentRequests)}, + {[{limiter_id, Id}, {limiting_type, leaky_bucket_tokens}], maps:size(LeakyPeers)}, + {[{limiter_id, Id}, {limiting_type, sliding_window_timestamps}], maps:size(SlidingPeers)} + ], + Items ++ Acc. diff --git a/apps/arweave_limiter/src/arweave_limiter_sup.erl b/apps/arweave_limiter/src/arweave_limiter_sup.erl new file mode 100644 index 0000000000..52c63289e7 --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter_sup.erl @@ -0,0 +1,196 @@ +-module(arweave_limiter_sup). +-behaviour(supervisor). + +%% API +-export([start_link/0, all_info/0]). + +-ifdef(AR_TEST). +-export([start_link/1, child_spec/1, reset_all/0]). +-endif. + +%% Supervisor callbacks +-export([init/1]). + +-include_lib("arweave_config/include/arweave_config.hrl"). +-include_lib("arweave/include/ar_sup.hrl"). + +-include_lib("kernel/include/logger.hrl"). + +%% =================================================================== +%% API functions +%% =================================================================== +start_link() -> + start_link(get_limiter_config()). + +start_link(Config) -> + supervisor:start_link({local, ?MODULE}, ?MODULE, [Config]). + +%% =================================================================== +%% Supervisor callbacks +%% =================================================================== +init([Config]) -> + ok = arweave_limiter_metrics:register(), + {ok, {supervisor_spec(Config), children_spec(Config)}}. + +supervisor_spec(_Config) -> + #{ strategy => one_for_all, + intensity => 5, + period => 10 }. + +%%-------------------------------------------------------------------- +%% Child spec generation based on Config. +%%-------------------------------------------------------------------- +children_spec(Configs) -> + [child_spec(Config) || Config <- Configs]. + +child_spec(#{id := Id} = Config) -> + #{ id => Id, + start => {arweave_limiter_group, start_link, [Id, Config]}, + type => worker, + shutdown => ?SHUTDOWN_TIMEOUT}. + +get_limiter_config() -> + {ok, Config} = arweave_config:get_env(), + [ + #{id => chunk, + sliding_window_limit => Config#config.'http_api.limiter.chunk.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.chunk.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.chunk.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.chunk.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.chunk.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.chunk.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.chunk.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.chunk.is_manual_reduction_disabled'}, + + #{id => data_sync_record, + sliding_window_limit => Config#config.'http_api.limiter.data_sync_record.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.data_sync_record.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.data_sync_record.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.data_sync_record.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.data_sync_record.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.data_sync_record.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.data_sync_record.is_manual_reduction_disabled'}, + + #{id => recent_hash_list_diff, + sliding_window_limit => Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.recent_hash_list_diff.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled'}, + + #{id => block_index, + sliding_window_limit => Config#config.'http_api.limiter.block_index.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.block_index.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.block_index.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.block_index.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.block_index.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.block_index.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.block_index.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.block_index.is_manual_reduction_disabled'}, + + #{id => wallet_list, + sliding_window_limit => Config#config.'http_api.limiter.wallet_list.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.wallet_list.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.wallet_list.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.wallet_list.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.wallet_list.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.wallet_list.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.wallet_list.is_manual_reduction_disabled'}, + + #{id => get_vdf, + sliding_window_limit => Config#config.'http_api.limiter.get_vdf.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.get_vdf.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.get_vdf.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.get_vdf.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.get_vdf.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.get_vdf.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.get_vdf.is_manual_reduction_disabled'}, + + #{id => get_vdf_session, + sliding_window_limit => Config#config.'http_api.limiter.get_vdf_session.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.get_vdf_session.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.get_vdf_session.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.get_vdf_session.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.get_vdf_session.is_manual_reduction_disabled'}, + + #{id => get_previous_vdf_session, + sliding_window_limit => Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_limit', + sliding_window_duration => + Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.get_previous_vdf_session.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled'}, + + #{id => general, + sliding_window_limit => Config#config.'http_api.limiter.general.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.general.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.general.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.general.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.general.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.general.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.general.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.general.is_manual_reduction_disabled'}, + + #{id => metrics, + sliding_window_limit => Config#config.'http_api.limiter.metrics.sliding_window_limit', + sliding_window_duration => Config#config.'http_api.limiter.metrics.sliding_window_duration', + timestamp_cleanup_tick_ms => + Config#config.'http_api.limiter.metrics.sliding_window_timestamp_cleanup_interval', + timestamp_cleanup_expiry => + Config#config.'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry', + leaky_rate_limit => Config#config.'http_api.limiter.metrics.leaky_limit', + leaky_tick_ms => Config#config.'http_api.limiter.metrics.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.metrics.leaky_tick_interval', + concurrency_limit => Config#config.'http_api.limiter.metrics.concurrency_limit', + is_manual_reduction_disabled => Config#config.'http_api.limiter.metrics.is_manual_reduction_disabled'}, + %% Local peers + #{id => local_peers, + no_limit => true} + ]. + +all_info() -> + Children = supervisor:which_children(?MODULE), + [{Id, arweave_limiter_group:info(Id)} || {Id, _Child, _Type, _Modules} <- Children]. + +reset_all() -> + Children = supervisor:which_children(?MODULE), + [{Id, arweave_limiter_group:reset_all(Id)} || {Id, _Child, _Type, _Modules} <- Children]. diff --git a/apps/arweave_limiter/src/arweave_limiter_time.erl b/apps/arweave_limiter/src/arweave_limiter_time.erl new file mode 100644 index 0000000000..34a68db7b8 --- /dev/null +++ b/apps/arweave_limiter/src/arweave_limiter_time.erl @@ -0,0 +1,17 @@ +%%% +%%% @doc Rate limiter clock and time management library +%%% + +%%% NOTE: this module seems pretty redundant. However, moving erlang:monotonic_time/1 +%%% into this module allows us to test production code without alteration +%%% and mock time related functions, and so manipulate and control time precisely +%%% in tests. So here it is. + +-module(arweave_limiter_time). + +-export([ + ts_now/0 + ]). + +ts_now() -> + erlang:monotonic_time(millisecond). diff --git a/apps/arweave_limiter/test/arweave_limiter_group_tests.erl b/apps/arweave_limiter/test/arweave_limiter_group_tests.erl new file mode 100644 index 0000000000..0dfde44d21 --- /dev/null +++ b/apps/arweave_limiter/test/arweave_limiter_group_tests.erl @@ -0,0 +1,609 @@ +-module(arweave_limiter_group_tests). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("arweave/include/ar.hrl"). + +-define(M, arweave_limiter_group). +-define(TABLE, eunit_arweave_limiter_tests_mock). +-define(KEY, ts_now). +-define(TEST_LIMITER, test_limiter). + +-define(setTsMock(Ts), ets:insert(?TABLE, {?KEY, Ts})). + +-define(assertHandlerRegisterOrRejectCall(LimiterRef, Pattern, Peer, Now), + ((fun () -> + ?assert(?setTsMock(Now)), + spawn_link(fun() -> + ?assertMatch( + Pattern, + ?M:register_or_reject_call(LimiterRef, Peer)), + receive + done -> ok + end + end) + end)())). + +expire_test() -> + IP = {1,2,3,4}, + ?assertEqual([], ?M:expire_and_get_requests(IP, #{}, 1000, 1)), + ?assertEqual([1], ?M:drop_expired([1], 1000, 500)), + ?assertEqual([1], ?M:expire_and_get_requests(IP, #{IP => [1]}, 1000, 500)), + ?assertEqual([1, 500], ?M:expire_and_get_requests(IP, #{IP => [1, 500]}, 1000, 501)), + ?assertEqual([500, 501], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1100)), + ?assertEqual([500, 501], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1499)), + ?assertEqual([501], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1500)), + ?assertEqual([], ?M:expire_and_get_requests(IP, #{IP => [1, 500, 501]}, 1000, 1501)), + ok. + +add_and_order_test() -> + ?assertEqual([5], ?M:add_and_order_timestamps(5, [])), + ?assertEqual([1,2,3,4,5], ?M:add_and_order_timestamps(5, [1,2,3,4])), + ?assertEqual([1,2,3,4,5,6,7], ?M:add_and_order_timestamps(5, [1,2,3,4,6,7])), + ?assertEqual([5,7,8], ?M:add_and_order_timestamps(5, [7,8])), + ok. + +cleanup_timestamps_map_test() -> + IP1 = {1,2,3,4}, + IP2 = {2,3,4,5}, + ?assertEqual( + #{IP1 => [1], + IP2 => [500] + }, ?M:cleanup_expired_sliding_peers( + #{IP1 => [1], + IP2 => [500]}, 1000, 501)), + ?assertEqual( + #{%%IP1 => [1], - removed + IP2 => [500] + }, ?M:cleanup_expired_sliding_peers( + #{IP1 => [1], + IP2 => [500]}, 1000, 1100)), + Empty = ?M:cleanup_expired_sliding_peers( + #{IP1 => [1], + IP2 => [500]}, 1000, 2100), + %% Now it's empty + ?assertEqual(0, maps:size(Empty)), + ok. + +setup(Config) -> + ?TABLE = ets:new(?TABLE, [named_table, public]), + ?setTsMock(0), + {module, arweave_limiter_time} = code:ensure_loaded(arweave_limiter_time), + + ok = meck:new(prometheus_counter, [passthrough]), + ok = meck:expect(prometheus_counter, inc, 2, ok), + ok = meck:expect(prometheus_counter, inc, 3, ok), + + ok = meck:new(arweave_limiter_time, []), + ok = meck:expect(arweave_limiter_time, ts_now, + fun() -> + [{?KEY, Value}] = ets:lookup(?TABLE, ?KEY), + Value + end), + 0 = arweave_limiter_time:ts_now(), + {ok, LimiterPid} = ?M:start_link(?TEST_LIMITER, Config), + LimiterPid. + +cleanup(_Config, _LimiterPid) -> + true = meck:validate(arweave_limiter_time), + true = meck:validate(prometheus_counter), + ok = meck:unload([prometheus_counter, arweave_limiter_time]), + ?M:stop(?TEST_LIMITER), + true = ets:delete(?TABLE), + ok. + +rate_limiter_process_test_() -> + {foreachx, + fun setup/1, + fun cleanup/2, + [{#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit => 0, + concurrency_limit => 5, + sliding_window_limit => 2, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun(_Config, _LimiterPid) -> {"sliding test", fun simple_sliding_happy/0} end}, + {#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit => 5, + concurrency_limit => 2, + sliding_window_limit => 0, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun simple_leaky_happy_path/2}, + {#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit=> 5, + concurrency_limit => 2, + sliding_window_limit => 0, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun rate_limiter_rejected_due_concurrency/2}, + {#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit => 2, + concurrency_limit => 5, + sliding_window_limit => 0, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun rejected_due_leaky_rate/2}, + {#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit => 1, + concurrency_limit => 10, + sliding_window_limit => 1, + sliding_window_duration => 100000, + leaky_tick_interval_ms => 10000000, + timestamp_cleanup_expiry => 1000, + timestamp_cleanup_interval_ms => 1000000}, + fun both_exhausted/2}, + {#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit => 1, + concurrency_limit => 2, + sliding_window_limit => 1, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun peer_cleanup/2}, + {#{id => ?TEST_LIMITER, + tick_reduction => 1, + leaky_rate_limit => 5, + concurrency_limit => 10, + sliding_window_limit => 0, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun leaky_manual_reduction/2}, + {#{id => ?TEST_LIMITER, + is_manual_reduction_disabled => true, + tick_reduction => 1, + leaky_rate_limit => 5, + concurrency_limit => 10, + sliding_window_limit => 0, + sliding_window_duration => 1000, + timestamp_cleanup_expiry => 1000, + leaky_tick_interval_ms => 100000}, + fun leaky_manual_reduction_disabled/2} + ]}. + +simple_sliding_happy() -> + IP = {1,2,3,4}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 1), + Caller1 ! done, + + timer:sleep(100), + Info1 = ?M:info(?TEST_LIMITER), + ?assertMatch(#{sliding_timestamps := #{IP := [1]}}, Info1), + #{concurrent_requests := ConcurrentReqs1} = Info1, + ?assertEqual(0, maps:size(ConcurrentReqs1)), + + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 500), + Caller2 ! done, + + timer:sleep(100), + Info2 = ?M:info(?TEST_LIMITER), + ?assertMatch(#{sliding_timestamps := #{IP := [1,500]}}, Info2), + #{concurrent_requests := ConcurrentReqs2} = Info2, + ?assertEqual(0, maps:size(ConcurrentReqs2)), + + Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 2000), + Caller3 ! done, + timer:sleep(100), + %% 2 previous ts expired due to the time elapsed. + ?assertMatch(#{sliding_timestamps := #{IP := [2000]}}, ?M:info(?TEST_LIMITER)), + + Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 2001), + Caller4 ! done, + timer:sleep(100), + ?assertMatch(#{sliding_timestamps := #{IP := [2000, 2001]}}, ?M:info(?TEST_LIMITER)), + + Caller5 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {reject, rate_limit, _} , IP, 2002), + Caller5 ! done, + timer:sleep(100), + %% Wait a bit for surely have request processed, and observe, no new timestamp + ?assertMatch(#{sliding_timestamps := #{IP := [2000, 2001]}}, ?M:info(?TEST_LIMITER)), + ok. + +simple_leaky_happy_path(_Config, LimiterPid) -> + {"Leaky happy path", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), + IP = {1,2,3,4}, + %% init state, the ip is not blocked + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 0), + timer:sleep(20), + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 2), + + %% wait a bit so they are surely started. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + Caller1 ! done, + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{IP := [_]}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + Caller2 ! done, + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% Keys deleted + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + %% manually trigger a tick. + LimiterPid ! {tick, leaky_bucket_reduction}, + + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), + + %% manually trigger a tick. + LimiterPid ! {tick, leaky_bucket_reduction}, + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), + + %% manually trigger a tick. + LimiterPid ! {tick, leaky_bucket_reduction}, + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick + #{concurrent_requests := ConcurrentReqs, + leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), + ?assertEqual(0, maps:size(ConcurrentReqs)), + ?assertMatch(0, maps:size(LeakyTokens)), + ok + end}. + +rate_limiter_rejected_due_concurrency(_Config, LimiterPid) -> + {"rejected due concurrency", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), + %% init state, the ip is not blocked + IP = {1,2,3,4}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, -1), + timer:sleep(120), + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 10), + timer:sleep(120), + Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {reject, concurrency, _Data}, IP, 10), + + %% wait a bit so they are surely started. + timer:sleep(100), + + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + + Caller1 ! done, + Caller2 ! done, + Caller3 ! done, + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% Keys deleted + %% NOTE: concurrent_requests := #{} matches to any map, so we don't what's in there. + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + %% manually trigger a tick. + LimiterPid ! {tick, leaky_bucket_reduction}, + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), + + %% Concurrency reduced, one handler terminated, will register again + Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 0), + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + Caller4 ! done, + %% Keys deleted + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + + %% manually trigger two ticks. + LimiterPid ! {tick, leaky_bucket_reduction}, + LimiterPid ! {tick, leaky_bucket_reduction}, + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{}, + leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), + + %% manually trigger a tick. + LimiterPid ! {tick, leaky_bucket_reduction}, + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick + #{concurrent_requests := ConcurrentReqs, + leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), + ?assertEqual(0, maps:size(ConcurrentReqs)), + ?assertEqual(0, maps:size(LeakyTokens)), + ok + end}. + +rejected_due_leaky_rate(_Config, LimiterPid) -> + {"rejected due leaky rate", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), + %% init state, the ip is not blocked + IP = {1,2,3,4}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 1), + timer:sleep(20), + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 2), + timer:sleep(20), + Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {reject, rate_limit, _Data}, IP, 3), + + %% wait a bit so they are surely started. + timer:sleep(100), + %% 2 concurrent, 2 token + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + %% Simulate a tick + LimiterPid ! {tick, leaky_bucket_reduction}, + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% 2 concurrent, but tokens reduced. + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), + + %% Tokens reduced, will register again + Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 10), + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% 3 concurrent, 2 tokens + ?assertMatch(#{concurrent_requests := #{IP := [_,_,_]}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + %% manually trigger two ticks. + LimiterPid ! {tick, leaky_bucket_reduction}, + LimiterPid ! {tick, leaky_bucket_reduction}, + + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{IP := [_,_,_]}, + leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), + + %% Clean up + Caller1 ! done, + Caller2 ! done, + Caller3 ! done, + Caller4 ! done, + + LimiterPid ! {tick, leaky_bucket_reduction}, + %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick + LimiterPid ! {tick, leaky_bucket_reduction}, + + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + #{concurrent_requests := ConcurrentReqs, + leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), + ?assertEqual(0, maps:size(ConcurrentReqs)), + ?assertEqual(0, maps:size(LeakyTokens)), + ok + end}. + +both_exhausted(_Config, LimiterPid) -> + {"Both exhausted", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), + IP = {1,2,3,4}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, -1), + + %% wait a bit so they are surely started. + timer:sleep(100), + %% 1 concurrent, 0 token + ?assertMatch(#{concurrent_requests := #{IP := [_]}, + sliding_timestamps := #{IP := [_]}, + leaky_tokens := #{}}, ?M:info(?TEST_LIMITER)), + + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), + + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% 2 concurrent, but tokens reduced. + Info = ?M:info(?TEST_LIMITER), + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + sliding_timestamps := #{IP := [_]}, + leaky_tokens := #{IP := 1}}, Info), + + Caller3 = ?assertHandlerRegisterOrRejectCall( + ?TEST_LIMITER, {reject, rate_limit, _Data}, IP, 130), + + %% Tokens reduced, will register again + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% 2 concurrent, 1 token + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + sliding_timestamps := #{IP := [_]}, + leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), + + %% Clean up + Caller1 ! done, + Caller2 ! done, + Caller3 ! done, + + LimiterPid ! {tick, leaky_bucket_reduction}, + %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick + LimiterPid ! {tick, leaky_bucket_reduction}, + + %% wait a tiny bit so the tick logic surely runs. + timer:sleep(100), + #{concurrent_requests := ConcurrentReqs, + leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), + ?assertEqual(0, maps:size(ConcurrentReqs)), + ?assertEqual(0, maps:size(LeakyTokens)), + + ok + end}. + +peer_cleanup(_Config, LimiterPid) -> + {"Peer cleanup", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), + %% init state, the ip is not blocked + IP = {1,2,3,4}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, sliding}, IP, 1), + + %% wait a bit so they are surely started. + timer:sleep(100), + %% 2 concurrent, 2 token + ?assertMatch(#{concurrent_requests := #{IP := [_]}, + sliding_timestamps := #{IP := [_]}, + leaky_tokens := #{}}, ?M:info(?TEST_LIMITER)), + + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), + + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% 2 concurrent, but tokens reduced. + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + sliding_timestamps := #{IP := [_]}, + leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), + + %% further requests are rejected + Caller3 = ?assertHandlerRegisterOrRejectCall( + ?TEST_LIMITER, {reject, concurrency, _Data}, IP, 300), + + %% Tokens reduced, will register again + %% wait a tiny bit so the logic surely runs. + timer:sleep(100), + %% 2 concurrent, 1 token + ?assertMatch(#{concurrent_requests := #{IP := [_,_]}, + sliding_timestamps := #{IP := [_]}, + leaky_tokens := #{IP := 1}}, ?M:info(?TEST_LIMITER)), + + %% Clean up + Caller1 ! done, + Caller2 ! done, + Caller3 ! done, + LimiterPid ! {tick, leaky_bucket_reduction}, + %% Key only deleted from leaky_tokens map, when it reached 0 in the previous tick + LimiterPid ! {tick, leaky_bucket_reduction}, + + %% wait a tiny bit so the tick logic surely runs. + %% Now we still have timestamps for IP1 in the state. + timer:sleep(100), + #{concurrent_requests := ConcurrentReqs, + sliding_timestamps := SlidingTimestamps, + leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), + ?assertEqual(0, maps:size(ConcurrentReqs)), + ?assertEqual(1, maps:size(SlidingTimestamps)), + ?assertEqual(0, maps:size(LeakyTokens)), + + ?setTsMock(20000), + + timer:sleep(500), + %% Trigger timestamp cleanup. + LimiterPid ! {tick, sliding_window_timestamp_cleanup}, + + %% wait a tiny bit so the tick logic surely runs. + %% Now we should have all cleaned up. + timer:sleep(100), + #{concurrent_requests := ConcurrentReqs, + sliding_timestamps := SlidingTimestamps2, + leaky_tokens := LeakyTokens} = ?M:info(?TEST_LIMITER), + ?assertEqual(0, maps:size(ConcurrentReqs)), + ?assertEqual(0, maps:size(SlidingTimestamps2)), + ?assertEqual(0, maps:size(LeakyTokens)), + + ok + end}. + +leaky_manual_reduction(_Config, _LimiterPid) -> + {"Leaky tokens manual peer reduction", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := false}, ?M:config(?TEST_LIMITER)), + %% init state, the ip is not blocked + IP = {1,2,3,4}, + NonRecordedIP = {2,3,4,5,1984}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 1), + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), + Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 40), + Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 60), + %% wait a bit so they are surely started. + timer:sleep(100), + %% 2 concurrent, 2 token + ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, + leaky_tokens := #{IP := 4}}, ?M:info(?TEST_LIMITER)), + + ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + + %% call for one that's surely not in the state + ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, NonRecordedIP)), + + %% 2 concurrent, but tokens reduced. + ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, + leaky_tokens := #{IP := 2}}, ?M:info(?TEST_LIMITER)), + + ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + + %% 4 concurrent, but tokens reduced. + ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, + leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), + + ?assertEqual(ok, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + + %% 4 concurrent, no change, there is nothing to reduce beyond 0 + ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, + leaky_tokens := #{IP := 0}}, ?M:info(?TEST_LIMITER)), + + %% Clean up + Caller1 ! done, + Caller2 ! done, + Caller3 ! done, + Caller4 ! done, + + ok + end}. + +leaky_manual_reduction_disabled(_Config, _LimiterPid) -> + {"Leaky tokens manual peer reduction", + fun() -> + ?assertMatch(#{is_manual_reduction_disabled := true}, ?M:config(?TEST_LIMITER)), + %% init state, the ip is not blocked + IP = {1,2,3,4}, + + Caller1 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 1), + Caller2 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 20), + Caller3 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 40), + Caller4 = ?assertHandlerRegisterOrRejectCall(?TEST_LIMITER, {register, leaky}, IP, 60), + %% wait a bit so they are surely started. + timer:sleep(100), + ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, + leaky_tokens := #{IP := 4}}, ?M:info(?TEST_LIMITER)), + + ?assertEqual(disabled, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + + %% Didn't reduce anything + ?assertMatch(#{concurrent_requests := #{IP := [_, _, _, _]}, + leaky_tokens := #{IP := 4}}, ?M:info(?TEST_LIMITER)), + + %% We can repeat this, but still disabled + ?assertEqual(disabled, ?M:reduce_for_peer(?TEST_LIMITER, IP)), + + %% Clean up + Caller1 ! done, + Caller2 ! done, + Caller3 ! done, + Caller4 ! done, + + ok + end}. diff --git a/apps/arweave_limiter/test/arweave_limiter_metrics_collector_tests.erl b/apps/arweave_limiter/test/arweave_limiter_metrics_collector_tests.erl new file mode 100644 index 0000000000..06545ec123 --- /dev/null +++ b/apps/arweave_limiter/test/arweave_limiter_metrics_collector_tests.erl @@ -0,0 +1,118 @@ +-module(arweave_limiter_metrics_collector_tests). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("arweave/include/ar.hrl"). + +-define(M, arweave_limiter_metrics_collector). +-define(S, arweave_limiter_sup). +-define(L, arweave_limiter). +-define(ME, arweave_limiter_metrics). + +-define(GENERAL, general_test). +-define(METRICS, metrics_test). + +%% Very similar but not identical to ar_limiter_tests macro +-define(assertHandlerRegisterOrRejectCall(LimiterRef, Pattern, Peer), + ((fun () -> + spawn_link(fun() -> + ?assertMatch( + Pattern, + ?L:register_or_reject_call(LimiterRef, Peer)), + receive + done -> ok + end + end) + end)())). + +do_setup() -> + %% It would be tempting to just use what the node has started already, + %% but we need to start new limiters to control the config, and make + %% sure these tests don't break with only config change. + %% It is especially important to increase the interval for the tests. + Configs = [#{id => ?GENERAL, + leaky_rate_limit => 50, + concurrency_limit => 150, + sliding_window_limit => 100, + leaky_tick_interval_ms => 1000000}, + #{id => ?METRICS, + leaky_rate_limit => 50, + concurrency_limit => 150, + sliding_window_limit => 100, + leaky_tick_interval_ms => 1000000} + ], + LimiterIds = + lists:map(fun(Config) -> + {ok, _LimPid} = supervisor:start_child(?S, ?S:child_spec(Config)), + maps:get(id, Config) + end, Configs), + {LimiterIds, []}. + +do_setup_with_data() -> + {LimiterIds, _Callers} = do_setup(), + %% Generate IP tuples (up to like 16k peers), but any term can be a peer ID. + Port = 1984, + IPs = [{1,2,X div 128, X rem 128, Port} || X <- lists:seq(1, 1000)], + + Callers = lists:foldl(fun(IP, Acc) -> + Acc ++ [?assertHandlerRegisterOrRejectCall(?GENERAL, {register, _}, IP) || + _ <- lists:seq(1,150)] + end, [], IPs), + timer:sleep(500), + + {LimiterIds, Callers}. + +cleanup({LimiterIds, Callers}) -> + [Caller ! done || Caller <- Callers], + timer:sleep(150), + ok = lists:foreach(fun(Id) -> + supervisor:terminate_child(?S, Id), + supervisor:delete_child(?S, Id), + ?debugFmt(">>> Terminated and deleted limiter: ~p ~n", [Id]) + end, LimiterIds), + ok. + +empty_limiters_sanity_check_test_() -> + { + setup, + fun do_setup/0, + fun cleanup/1, + fun({_Sup, _Callers}) -> + [fun() -> + ?assertMatch( + [{ar_limiter_tracked_items_total,gauge, + "tracked requests, timestamps, leaky tokens", + _}, + {ar_limiter_peers,gauge,[],_}], ?M:metrics()) + end] + end + }. + + +rate_limiter_happy_path_sanity_check_test_() -> + { + setup, + fun do_setup_with_data/0, + fun cleanup/1, + fun({_Sup, _Callers}) -> + [fun() -> + ?assertMatch( + [{ar_limiter_tracked_items_total,gauge, + "tracked requests, timestamps, leaky tokens", + _}, + {ar_limiter_peers,gauge,[],_}], ?M:metrics()), + + Info = arweave_limiter_group:info(?GENERAL), + ?assertMatch( + [ + {[{limiter_id, ?GENERAL}, {limiting_type, concurrency}], 150*1000}, + {[{limiter_id, ?GENERAL}, {limiting_type, leaky_bucket_tokens}], 1000}, + {[{limiter_id, ?GENERAL}, {limiting_type, sliding_window_timestamps}], 100*1000} + ], ?M:tracked_items([{?GENERAL, Info}])), + ?assertMatch( + [ + {[{limiter_id, ?GENERAL}, {limiting_type, concurrency}], 1000}, + {[{limiter_id, ?GENERAL}, {limiting_type, leaky_bucket_tokens}], 1000}, + {[{limiter_id, ?GENERAL}, {limiting_type, sliding_window_timestamps}], 1000} + ], ?M:peers([{?GENERAL, Info}])) + end] + end}. diff --git a/bin/arweave b/bin/arweave index cecfe99f24..b8c4d4ecae 100755 --- a/bin/arweave +++ b/bin/arweave @@ -564,9 +564,19 @@ arweave_test_run() { TEST_NODE="${TEST_NODE_NAME}@${TEST_NODE_HOST}" fi - TEST_PATH="$(./rebar3 as ${TEST_PROFILE} path)" - TEST_PATH_BASE="$(./rebar3 as ${TEST_PROFILE} path --base)/lib/arweave/test" - PARAMS="-pa ${TEST_PATH} ${TEST_PATH_BASE} -config ${TEST_CONFIG} -noshell" + TEST_PATH="$(./rebar3 as ${TEST_PROFILE} path)" + + ## TODO: Generate path for all apps -> Should we fetch this from somewhere? + APPS="arweave arweave_config arweave_limiter arweave_diagnostic" + + PATH_ARGS="" + for app in $APPS; do + P="$(./rebar3 as ${TEST_PROFILE} path --base)/lib/${app}/test" + echo $P + PATH_ARGS="${PATH_ARGS} ${P}" + done + + PARAMS="-pa ${TEST_PATH} ${PATH_ARGS} -config ${TEST_CONFIG} -noshell" ENTRY_POINT="-run ar ${TEST_MODULE} ${*} -s init stop" command="erl ${PARAMS} -name ${TEST_NODE} -setcookie ${TEST_COOKIE} ${ENTRY_POINT}" echo -e "\033[0;32m===> Execute command ${command}\033[0m" diff --git a/rebar.config b/rebar.config index 51ffe3aeb5..2875f2e4d2 100644 --- a/rebar.config +++ b/rebar.config @@ -42,6 +42,7 @@ {relx, [ {release, {arweave, "2.9.5"}, [ arweave_config, + {arweave_limiter, load}, {arweave_diagnostic, load}, {arweave, load}, {recon, load}, @@ -433,7 +434,7 @@ ]} ]}, {test, [ - {deps, [{meck, "0.8.13"}]}, + {deps, [{meck, "1.1.0"}]}, {erl_opts, [ {d, 'DEBUG', debug}, {d, 'FORKS_RESET', true}, @@ -528,6 +529,7 @@ {relx, [ {release, {arweave, "2.9.5"}, [ arweave_config, + arweave_limiter, {arweave_diagnostic, load}, {arweave, load}, {recon, load}, From 6b3377b85e665a37e245996a56fe7945035db1d3 Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Mon, 19 Jan 2026 19:45:20 +0000 Subject: [PATCH 2/7] fix tests again correct tick reduction config parameter further alignment of config params --- apps/arweave/test/.#ar_test_node.erl | 1 + apps/arweave/test/ar_test_node.erl | 4 ++-- apps/arweave/test/ar_test_runner.erl | 1 + apps/arweave_limiter/src/arweave_limiter.erl | 4 ++-- .../src/arweave_limiter_group.erl | 12 +++++------ .../src/arweave_limiter_sup.erl | 20 +++++++++---------- .../test/arweave_limiter_group_tests.erl | 18 ++++++++--------- 7 files changed, 30 insertions(+), 30 deletions(-) create mode 120000 apps/arweave/test/.#ar_test_node.erl diff --git a/apps/arweave/test/.#ar_test_node.erl b/apps/arweave/test/.#ar_test_node.erl new file mode 120000 index 0000000000..614a2c6f09 --- /dev/null +++ b/apps/arweave/test/.#ar_test_node.erl @@ -0,0 +1 @@ +kristofhetzl@Kristofs-MacBook-Pro-2.local.1386 \ No newline at end of file diff --git a/apps/arweave/test/ar_test_node.erl b/apps/arweave/test/ar_test_node.erl index 5ea1c119d9..eaa71b7090 100644 --- a/apps/arweave/test/ar_test_node.erl +++ b/apps/arweave/test/ar_test_node.erl @@ -304,7 +304,7 @@ start_node(B0, Config, WaitUntilSync) -> clean_up_and_stop(), prometheus:start(), arweave_config:start(), - arweave_limiter:start(), + ok = arweave_limiter:start(), {ok, BaseConfig} = arweave_config:get_env(), write_genesis_files(BaseConfig#config.data_dir, B0), update_config(Config), @@ -660,7 +660,7 @@ start(B0, RewardAddr, Config, StorageModules) -> clean_up_and_stop(), prometheus:start(), arweave_config:start(), - arweave_limiter:start(), + ok = arweave_limiter:start(), write_genesis_files(Config#config.data_dir, B0), ok = arweave_config:set_env(Config#config{ start_from_latest_state = true, diff --git a/apps/arweave/test/ar_test_runner.erl b/apps/arweave/test/ar_test_runner.erl index 77515355f8..5177655939 100644 --- a/apps/arweave/test/ar_test_runner.erl +++ b/apps/arweave/test/ar_test_runner.erl @@ -107,6 +107,7 @@ run_tests(TestType, TestSpec) -> ensure_started(TestType) -> try arweave_config:start(), + ok = arweave_limiter:start(), start_for_tests(TestType), ar_test_node:boot_peers(TestType), ar_test_node:wait_for_peers(TestType) diff --git a/apps/arweave_limiter/src/arweave_limiter.erl b/apps/arweave_limiter/src/arweave_limiter.erl index 415f29ce97..a1c0e00965 100644 --- a/apps/arweave_limiter/src/arweave_limiter.erl +++ b/apps/arweave_limiter/src/arweave_limiter.erl @@ -31,7 +31,7 @@ %%-------------------------------------------------------------------- -%% @doc helper function to start `arweave_config' application. +%% @doc helper function to start `arweave_limiter' application. %% @end %%-------------------------------------------------------------------- -spec start() -> ok | {error, term()}. @@ -39,7 +39,7 @@ start() -> case application:ensure_all_started(?MODULE, permanent) of {ok, Dependencies} -> - ?LOG_DEBUG("arweave_limiter started dependencies: ~p", Dependencies), + ?LOG_DEBUG("arweave_limiter started dependencies: ~p", [Dependencies]), ok; Elsewise -> Elsewise diff --git a/apps/arweave_limiter/src/arweave_limiter_group.erl b/apps/arweave_limiter/src/arweave_limiter_group.erl index 6bd7173cad..3e5fa50747 100644 --- a/apps/arweave_limiter/src/arweave_limiter_group.erl +++ b/apps/arweave_limiter/src/arweave_limiter_group.erl @@ -37,6 +37,7 @@ cleanup_expired_sliding_peers/3]). -endif. + -include_lib("arweave/include/ar.hrl"). -include_lib("arweave_config/include/arweave_config.hrl"). @@ -89,8 +90,8 @@ init([Config] = _Args) -> IsDisabled = maps:get(no_limit, Config, false), IsManualReductionDisabled = maps:get(is_manual_reduction_disabled, Config, false), - LeakyTickMs = maps:get(leaky_tick_interval_ms, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL), - TimestampCleanupTickMs = maps:get(timestamp_cleanup_interval_ms, Config, + LeakyTickMs = maps:get(leaky_tick_ms, Config, ?DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL), + TimestampCleanupTickMs = maps:get(timestamp_cleanup_tick_ms, Config, ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_INTERVAL), TimestampCleanupExpiry = maps:get(timestamp_cleanup_expiry, Config, ?DEFAULT_HTTP_API_LIMITER_TIMESTAMP_CLEANUP_EXPIRY), @@ -253,7 +254,8 @@ handle_info(Info, State = #{id := Id}) -> ?LOG_WARNING([{event, unhandled_info}, {id, Id}, {module, ?MODULE}, {info, Info}]), {noreply, State}. -terminate(_Reason, _State) -> +terminate(_Reason, #{leaky_tick_timer_ref := _LeakyRef, + timestamp_cleanup_timer_ref := _TsRef} = _State) -> ok. code_change(_OldVsn, State, _Extra) -> @@ -354,8 +356,6 @@ remove_concurrent(MonitorRef, _Pid, _Reason, ConcurrentRequests, ConcurrentMonit filter_state_for_config(#{id := Id, is_disabled := IsDisabled, is_manual_reduction_disabled := IsManualReductionDisabled, - leaky_tick_timer_ref := LeakyRef, - timestamp_cleanup_timer_ref := TsRef, leaky_tick_ms := LeakyTickMs, timestamp_cleanup_tick_ms := TimestampCleanupTickMs, timestamp_cleanup_expiry := TimestampCleanupExpiry, @@ -367,8 +367,6 @@ filter_state_for_config(#{id := Id, #{id => Id, is_disabled => IsDisabled, is_manual_reduction_disabled => IsManualReductionDisabled, - leaky_tick_timer_ref => LeakyRef, - timestamp_cleanup_timer_ref => TsRef, leaky_tick_ms => LeakyTickMs, timestamp_cleanup_tick_ms => TimestampCleanupTickMs, timestamp_cleanup_expiry => TimestampCleanupExpiry, diff --git a/apps/arweave_limiter/src/arweave_limiter_sup.erl b/apps/arweave_limiter/src/arweave_limiter_sup.erl index 52c63289e7..898c153323 100644 --- a/apps/arweave_limiter/src/arweave_limiter_sup.erl +++ b/apps/arweave_limiter/src/arweave_limiter_sup.erl @@ -61,7 +61,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.chunk.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.chunk.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.chunk.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.chunk.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.chunk.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.chunk.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.chunk.is_manual_reduction_disabled'}, @@ -74,7 +74,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.data_sync_record.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.data_sync_record.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.data_sync_record.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.data_sync_record.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.data_sync_record.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.data_sync_record.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.data_sync_record.is_manual_reduction_disabled'}, @@ -87,7 +87,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.recent_hash_list_diff.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.recent_hash_list_diff.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.recent_hash_list_diff.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.recent_hash_list_diff.is_manual_reduction_disabled'}, @@ -100,7 +100,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.block_index.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.block_index.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.block_index.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.block_index.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.block_index.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.block_index.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.block_index.is_manual_reduction_disabled'}, @@ -113,7 +113,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.wallet_list.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.wallet_list.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.wallet_list.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.wallet_list.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.wallet_list.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.wallet_list.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.wallet_list.is_manual_reduction_disabled'}, @@ -126,7 +126,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.get_vdf.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.get_vdf.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.get_vdf.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.get_vdf.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.get_vdf.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.get_vdf.is_manual_reduction_disabled'}, @@ -139,7 +139,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.get_vdf_session.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.get_vdf_session.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.get_vdf_session.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.get_vdf_session.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.get_vdf_session.is_manual_reduction_disabled'}, @@ -153,7 +153,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.get_previous_vdf_session.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.get_previous_vdf_session.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.get_previous_vdf_session.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.get_previous_vdf_session.is_manual_reduction_disabled'}, @@ -166,7 +166,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.general.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.general.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.general.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.general.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.general.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.general.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.general.is_manual_reduction_disabled'}, @@ -179,7 +179,7 @@ get_limiter_config() -> Config#config.'http_api.limiter.metrics.sliding_window_timestamp_cleanup_expiry', leaky_rate_limit => Config#config.'http_api.limiter.metrics.leaky_limit', leaky_tick_ms => Config#config.'http_api.limiter.metrics.leaky_tick_interval', - tick_reduction => Config#config.'http_api.limiter.metrics.leaky_tick_interval', + tick_reduction => Config#config.'http_api.limiter.metrics.leaky_tick_reduction', concurrency_limit => Config#config.'http_api.limiter.metrics.concurrency_limit', is_manual_reduction_disabled => Config#config.'http_api.limiter.metrics.is_manual_reduction_disabled'}, %% Local peers diff --git a/apps/arweave_limiter/test/arweave_limiter_group_tests.erl b/apps/arweave_limiter/test/arweave_limiter_group_tests.erl index 0dfde44d21..6a63939df3 100644 --- a/apps/arweave_limiter/test/arweave_limiter_group_tests.erl +++ b/apps/arweave_limiter/test/arweave_limiter_group_tests.erl @@ -102,7 +102,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 2, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun(_Config, _LimiterPid) -> {"sliding test", fun simple_sliding_happy/0} end}, {#{id => ?TEST_LIMITER, tick_reduction => 1, @@ -111,7 +111,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun simple_leaky_happy_path/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, @@ -120,7 +120,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun rate_limiter_rejected_due_concurrency/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, @@ -129,7 +129,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun rejected_due_leaky_rate/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, @@ -137,9 +137,9 @@ rate_limiter_process_test_() -> concurrency_limit => 10, sliding_window_limit => 1, sliding_window_duration => 100000, - leaky_tick_interval_ms => 10000000, + leaky_tick_ms => 10000000, timestamp_cleanup_expiry => 1000, - timestamp_cleanup_interval_ms => 1000000}, + timestamp_cleanup_tick_ms => 1000000}, fun both_exhausted/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, @@ -148,7 +148,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 1, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun peer_cleanup/2}, {#{id => ?TEST_LIMITER, tick_reduction => 1, @@ -157,7 +157,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun leaky_manual_reduction/2}, {#{id => ?TEST_LIMITER, is_manual_reduction_disabled => true, @@ -167,7 +167,7 @@ rate_limiter_process_test_() -> sliding_window_limit => 0, sliding_window_duration => 1000, timestamp_cleanup_expiry => 1000, - leaky_tick_interval_ms => 100000}, + leaky_tick_ms => 100000}, fun leaky_manual_reduction_disabled/2} ]}. From ef2ada9c4ae99a05a54d1d03c153410e8b22a29e Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Tue, 20 Jan 2026 15:51:15 +0000 Subject: [PATCH 3/7] remove accidentally commited emacs temp file --- apps/arweave/test/.#ar_test_node.erl | 1 - 1 file changed, 1 deletion(-) delete mode 120000 apps/arweave/test/.#ar_test_node.erl diff --git a/apps/arweave/test/.#ar_test_node.erl b/apps/arweave/test/.#ar_test_node.erl deleted file mode 120000 index 614a2c6f09..0000000000 --- a/apps/arweave/test/.#ar_test_node.erl +++ /dev/null @@ -1 +0,0 @@ -kristofhetzl@Kristofs-MacBook-Pro-2.local.1386 \ No newline at end of file From 12d16052d27bf2b1b69433c1eac2a1e6ffc91d7a Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Wed, 21 Jan 2026 10:41:53 +0000 Subject: [PATCH 4/7] start arweave_limiter on startup --- apps/arweave/src/ar.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/apps/arweave/src/ar.erl b/apps/arweave/src/ar.erl index 203bbf61b6..5c031c314d 100644 --- a/apps/arweave/src/ar.erl +++ b/apps/arweave/src/ar.erl @@ -1322,10 +1322,12 @@ stop(_State) -> stop_dependencies() -> ?LOG_INFO("========== Stopping Arweave Node =========="), + application:stop(arweave_limiter), {ok, [_Kernel, _Stdlib, _SASL, _OSMon | Deps]} = application:get_key(arweave, applications), lists:foreach(fun(Dep) -> application:stop(Dep) end, Deps). start_dependencies() -> + ok = arweave_limiter:start(), {ok, _} = application:ensure_all_started(arweave, permanent), ok. From 5a66a61e9e382206ecff68642083d843f796823f Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Wed, 21 Jan 2026 14:27:26 +0000 Subject: [PATCH 5/7] chunks limiter config --- apps/arweave_config/include/arweave_config.hrl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/arweave_config/include/arweave_config.hrl b/apps/arweave_config/include/arweave_config.hrl index 3433d401cf..c1f28d0383 100644 --- a/apps/arweave_config/include/arweave_config.hrl +++ b/apps/arweave_config/include/arweave_config.hrl @@ -194,8 +194,8 @@ %% Chunk RLG -define(DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_LIMIT, 100). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_SLIDING_WINDOW_DURATION, 1000). --define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_LIMIT, 100). --define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_INTERVAL, 1000). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_LIMIT, 6000). +-define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_INTERVAL, 30000). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_LEAKY_TICK_REDUCTION, 30). -define(DEFAULT_HTTP_API_LIMITER_CHUNK_CONCURRENCY_LIMIT, 200). From 4948de855a6363d189eaeb4510a21145dc790090 Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Wed, 21 Jan 2026 17:37:56 +0000 Subject: [PATCH 6/7] defaults, and more relaxed config params check --- apps/arweave/src/ar_config.erl | 40 +++++++++---------- .../arweave_config/include/arweave_config.hrl | 8 ++-- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/apps/arweave/src/ar_config.erl b/apps/arweave/src/ar_config.erl index aeeb979354..a13531ef0b 100644 --- a/apps/arweave/src/ar_config.erl +++ b/apps/arweave/src/ar_config.erl @@ -902,7 +902,7 @@ parse_options([{<<"http_api.tcp.send_timeout">>, Timeout}|Rest], Config) -> %% RATE LIMITER GENERAL parse_options([{<<"http_api.limiter.general.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.general.sliding_window_limit'}, Limit} @@ -938,7 +938,7 @@ parse_options([{<<"http_api.limiter.general.sliding_window_timestamp_cleanup_exp parse_options([{<<"http_api.limiter.general.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.general.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.general.leaky_limit'}, Limit} @@ -981,7 +981,7 @@ parse_options([{<<"http_api.limiter.general.is_manual_reduction_disabled">>, IsD %% RATE LIMITER CHUNK parse_options([{<<"http_api.limiter.chunk.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.chunk.sliding_window_limit'}, Limit} @@ -1017,7 +1017,7 @@ parse_options([{<<"http_api.limiter.chunk.sliding_window_timestamp_cleanup_expir parse_options([{<<"http_api.limiter.chunk.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.chunk.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.chunk.leaky_limit'}, Limit} @@ -1060,7 +1060,7 @@ parse_options([{<<"http_api.limiter.chunk.is_manual_reduction_disabled">>, IsDis %% RATE LIMITER DATA_SYNC_RECORD parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.sliding_window_limit'}, Limit} @@ -1096,7 +1096,7 @@ parse_options([{<<"http_api.limiter.data_sync_record.sliding_window_timestamp_cl parse_options([{<<"http_api.limiter.data_sync_record.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.data_sync_record.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.data_sync_record.leaky_limit'}, Limit} @@ -1139,7 +1139,7 @@ parse_options([{<<"http_api.limiter.data_sync_record.is_manual_reduction_disable %% RATE LIMITER RECENT_HASH_LIST_DIFF parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.sliding_window_limit'}, Limit} @@ -1175,7 +1175,7 @@ parse_options([{<<"http_api.limiter.recent_hash_list_diff.sliding_window_timesta parse_options([{<<"http_api.limiter.recent_hash_list_diff.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.recent_hash_list_diff.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.recent_hash_list_diff.leaky_limit'}, Limit} @@ -1218,7 +1218,7 @@ parse_options([{<<"http_api.limiter.recent_hash_list_diff.is_manual_reduction_di %% RATE LIMITER BLOCK_INDEX parse_options([{<<"http_api.limiter.block_index.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.block_index.sliding_window_limit'}, Limit} @@ -1254,7 +1254,7 @@ parse_options([{<<"http_api.limiter.block_index.sliding_window_timestamp_cleanup parse_options([{<<"http_api.limiter.block_index.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.block_index.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.block_index.leaky_limit'}, Limit} @@ -1297,7 +1297,7 @@ parse_options([{<<"http_api.limiter.block_index.is_manual_reduction_disabled">>, %% RATE LIMITER WALLET_LIST parse_options([{<<"http_api.limiter.wallet_list.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.sliding_window_limit'}, Limit} @@ -1333,7 +1333,7 @@ parse_options([{<<"http_api.limiter.wallet_list.sliding_window_timestamp_cleanup parse_options([{<<"http_api.limiter.wallet_list.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.wallet_list.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.wallet_list.leaky_limit'}, Limit} @@ -1376,7 +1376,7 @@ parse_options([{<<"http_api.limiter.wallet_list.is_manual_reduction_disabled">>, %% RATE LIMITER GET_VDF parse_options([{<<"http_api.limiter.get_vdf.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.sliding_window_limit'}, Limit} @@ -1412,7 +1412,7 @@ parse_options([{<<"http_api.limiter.get_vdf.sliding_window_timestamp_cleanup_exp parse_options([{<<"http_api.limiter.get_vdf.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf.leaky_limit'}, Limit} @@ -1455,7 +1455,7 @@ parse_options([{<<"http_api.limiter.get_vdf.is_manual_reduction_disabled">>, IsD %% RATE LIMITER GET_VDF_SESSION parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.sliding_window_limit'}, Limit} @@ -1491,7 +1491,7 @@ parse_options([{<<"http_api.limiter.get_vdf_session.sliding_window_timestamp_cle parse_options([{<<"http_api.limiter.get_vdf_session.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_vdf_session.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_vdf_session.leaky_limit'}, Limit} @@ -1534,7 +1534,7 @@ parse_options([{<<"http_api.limiter.get_vdf_session.is_manual_reduction_disabled %% RATE LIMITER GET_PREVIOUS_VDF_SESSION parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.sliding_window_limit'}, Limit} @@ -1570,7 +1570,7 @@ parse_options([{<<"http_api.limiter.get_previous_vdf_session.sliding_window_time parse_options([{<<"http_api.limiter.get_previous_vdf_session.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.get_previous_vdf_session.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.get_previous_vdf_session.leaky_limit'}, Limit} @@ -1613,7 +1613,7 @@ parse_options([{<<"http_api.limiter.get_previous_vdf_session.is_manual_reduction %% RATE LIMITER METRICS parse_options([{<<"http_api.limiter.metrics.sliding_window_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.sliding_window_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.metrics.sliding_window_limit'}, Limit} @@ -1649,7 +1649,7 @@ parse_options([{<<"http_api.limiter.metrics.sliding_window_timestamp_cleanup_exp parse_options([{<<"http_api.limiter.metrics.leaky_limit">>, Limit}|Rest], Config) -> case Limit of - Limit when is_integer(Limit), Limit > 0 -> + Limit when is_integer(Limit), Limit >= 0 -> parse_options(Rest, Config#config{'http_api.limiter.metrics.leaky_limit' = Limit }); _ -> {error, {bad_value, 'http_api.limiter.metrics.leaky_limit'}, Limit} diff --git a/apps/arweave_config/include/arweave_config.hrl b/apps/arweave_config/include/arweave_config.hrl index c1f28d0383..a0fab90647 100644 --- a/apps/arweave_config/include/arweave_config.hrl +++ b/apps/arweave_config/include/arweave_config.hrl @@ -184,11 +184,11 @@ -define(DEFAULT_HTTP_API_LIMITER_IS_MANUAL_REDUCTION_DISABLED, false). %% General RLG --define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT, 150). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GENERAL_SLIDING_WINDOW_DURATION, 1000). --define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, 150). --define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL, 1000). --define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION, 30). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_LIMIT, 450). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_INTERVAL, 30000). +-define(DEFAULT_HTTP_API_LIMITER_GENERAL_LEAKY_TICK_REDUCTION, 450). -define(DEFAULT_HTTP_API_LIMITER_GENERAL_CONCURRENCY_LIMIT, 150). %% Chunk RLG From c0d21906aa510b89fc2aa51a00b43d747073be20 Mon Sep 17 00:00:00 2001 From: Kristof Hetzl Date: Wed, 21 Jan 2026 18:24:07 +0000 Subject: [PATCH 7/7] vdf_session limiting defaults --- apps/arweave_config/include/arweave_config.hrl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/arweave_config/include/arweave_config.hrl b/apps/arweave_config/include/arweave_config.hrl index a0fab90647..d72a3a5d93 100644 --- a/apps/arweave_config/include/arweave_config.hrl +++ b/apps/arweave_config/include/arweave_config.hrl @@ -255,7 +255,7 @@ -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). --define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 4500). +-define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 50000). -else. -define(DEFAULT_HTTP_API_LIMITER_GET_VDF_SESSION_LEAKY_LIMIT, 30). -endif. @@ -267,7 +267,7 @@ -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_LIMIT, 0). -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_SLIDING_WINDOW_DURATION, 1000). -ifdef(AR_TEST). --define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 4500). +-define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 50000). -else. -define(DEFAULT_HTTP_API_LIMITER_GET_PREVIOUS_VDF_SESSION_LEAKY_LIMIT, 30). -endif.